Compare commits
517 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6dd26b8231 | ||
|
|
e098852a80 | ||
|
|
1e82c4a7c4 | ||
|
|
ce960565b1 | ||
|
|
755e675d5c | ||
|
|
b6c00405ec | ||
|
|
8f62935448 | ||
|
|
396d78352d | ||
|
|
8a405cab2f | ||
|
|
6d778a883f | ||
|
|
a51781e5cf | ||
|
|
90213ff1b2 | ||
|
|
6f764a8efd | ||
|
|
df35d7db9d | ||
|
|
4ba77a916d | ||
|
|
f9fecf0e76 | ||
|
|
9f9e0fe085 | ||
|
|
ee5b3622a5 | ||
|
|
14544d8d84 | ||
|
|
118270d76f | ||
|
|
fef5416b3c | ||
|
|
9f87283cd5 | ||
|
|
b8955fe577 | ||
|
|
13c3b8afe2 | ||
|
|
a8cd70f3e5 | ||
|
|
082f777281 | ||
|
|
9600e2b35e | ||
|
|
f75f707ff4 | ||
|
|
40b8d11209 | ||
|
|
c1b3f1994b | ||
|
|
18c4ecbbef | ||
|
|
dd52e5ebe9 | ||
|
|
8af1f0cc7b | ||
|
|
85e939636f | ||
|
|
d203e7e1cc | ||
|
|
4aa9ee153b | ||
|
|
5fb813a5cc | ||
|
|
817269475f | ||
|
|
2d168b532b | ||
|
|
fd4e15c116 | ||
|
|
3dfbe0f68c | ||
|
|
30135eed86 | ||
|
|
e4081aee62 | ||
|
|
df418a2783 | ||
|
|
9a65f6dc97 | ||
|
|
f04f8bbc78 | ||
|
|
ea6d61ab1f | ||
|
|
6f08edfb36 | ||
|
|
e9fdea05c6 | ||
|
|
e005910051 | ||
|
|
de2c106386 | ||
|
|
32a6dd1dd6 | ||
|
|
432aec73d9 | ||
|
|
36dae04671 | ||
|
|
9dc9f03c02 | ||
|
|
b18c0478e7 | ||
|
|
2d9860e875 | ||
|
|
d3553f8dfc | ||
|
|
e1ae90c12b | ||
|
|
34e7259f95 | ||
|
|
d0015b4d66 | ||
|
|
3467460456 | ||
|
|
64b5701971 | ||
|
|
fad59da29d | ||
|
|
91c839ad28 | ||
|
|
2786055df4 | ||
|
|
0a28c28a8c | ||
|
|
526546d588 | ||
|
|
2053b3414f | ||
|
|
ce870466ff | ||
|
|
964e354d06 | ||
|
|
8ee8ad777c | ||
|
|
82af0be1aa | ||
|
|
e8c18bc145 | ||
|
|
bd25f31100 | ||
|
|
ee7dcc2903 | ||
|
|
55ef51a99d | ||
|
|
dc2348daa5 | ||
|
|
042d7f25e4 | ||
|
|
8c1b649b2d | ||
|
|
f03ccec912 | ||
|
|
0bb65f84bb | ||
|
|
8e0910ab3e | ||
|
|
5353edcc38 | ||
|
|
3265112d04 | ||
|
|
4fdacb8b14 | ||
|
|
267f183fc8 | ||
|
|
3d22a9d84f | ||
|
|
51ec61ee94 | ||
|
|
74c2048ea9 | ||
|
|
730ac5381c | ||
|
|
6dd8a83c5a | ||
|
|
1a7e6d4768 | ||
|
|
98c950aacd | ||
|
|
94c52e3816 | ||
|
|
8766c5eb22 | ||
|
|
e0d22359e7 | ||
|
|
6dd13e68c2 | ||
|
|
633001c8ba | ||
|
|
e23a42305c | ||
|
|
63d2583e91 | ||
|
|
a2f66abbe8 | ||
|
|
b28661b673 | ||
|
|
e8791ae274 | ||
|
|
309975d477 | ||
|
|
6571641735 | ||
|
|
8757c963ba | ||
|
|
9a71f2fdfa | ||
|
|
9c26fe47b0 | ||
|
|
f3f47d8cd3 | ||
|
|
de1d39e436 | ||
|
|
ed1275a063 | ||
|
|
a7d407fa42 | ||
|
|
4e6e05f8e0 | ||
|
|
b0deea27df | ||
|
|
e98d89274f | ||
|
|
c59206bcd3 | ||
|
|
7f2d439baa | ||
|
|
5a80cbec2a | ||
|
|
2d19011a1d | ||
|
|
e82dcd195c | ||
|
|
fcb56d864c | ||
|
|
75cd4201b0 | ||
|
|
f24c017e9a | ||
|
|
b5280ba243 | ||
|
|
2a0e4b6f58 | ||
|
|
1898961ce3 | ||
|
|
236796ebd6 | ||
|
|
4e4f855b30 | ||
|
|
2db22deb93 | ||
|
|
fb8d0d7cf7 | ||
|
|
a536cf5dc0 | ||
|
|
b9b68e9331 | ||
|
|
632022971b | ||
|
|
def04f01cf | ||
|
|
bc67410548 | ||
|
|
7881791a91 | ||
|
|
d2f8f8c7ee | ||
|
|
8c32311b80 | ||
|
|
9bb88e610e | ||
|
|
d1e41695fe | ||
|
|
2aeb3fbe86 | ||
|
|
99b843a64e | ||
|
|
4f31a9a33b | ||
|
|
65ddff8899 | ||
|
|
e7c902bbbc | ||
|
|
5a5895203b | ||
|
|
7da0336ac8 | ||
|
|
3be616de3f | ||
|
|
c5bf22fd90 | ||
|
|
7c9f934875 | ||
|
|
b6f9b24b30 | ||
|
|
13cb814a0e | ||
|
|
d264d2c899 | ||
|
|
bebaff269c | ||
|
|
52b159b1db | ||
|
|
324834e4da | ||
|
|
c2ed1347d9 | ||
|
|
50f6f9fe58 | ||
|
|
48cb0ea34b | ||
|
|
6f7c99a333 | ||
|
|
3498f5b0ec | ||
|
|
21d8c0fd13 | ||
|
|
79b9a9ce46 | ||
|
|
b9b353db4b | ||
|
|
11a9b317a3 | ||
|
|
9af7d627ac | ||
|
|
76d9d54603 | ||
|
|
4c7c571875 | ||
|
|
313ba74b09 | ||
|
|
3e124315c8 | ||
|
|
78a0fd951e | ||
|
|
40852801ea | ||
|
|
f6980c4630 | ||
|
|
8fcc787cba | ||
|
|
4e6d3c093f | ||
|
|
61145361fd | ||
|
|
950b4ad9af | ||
|
|
6add646130 | ||
|
|
20e61fb362 | ||
|
|
18ced1102c | ||
|
|
d6af3c1237 | ||
|
|
5549a44566 | ||
|
|
e7af31c2ff | ||
|
|
e7971b1d55 | ||
|
|
26120d7838 | ||
|
|
bef7c01c58 | ||
|
|
6a8ccc5925 | ||
|
|
d85199e9de | ||
|
|
8608a84c23 | ||
|
|
b7226f4c82 | ||
|
|
f930ffe9e2 | ||
|
|
b50a245208 | ||
|
|
45bb11e020 | ||
|
|
b65cf281fd | ||
|
|
f781548b0c | ||
|
|
25ee8e74f7 | ||
|
|
c975d2cc7e | ||
|
|
ea66528739 | ||
|
|
e1164103d4 | ||
|
|
83fe70f710 | ||
|
|
12a6523fb2 | ||
|
|
dba61867e8 | ||
|
|
dd092f6c2b | ||
|
|
2a810c7da2 | ||
|
|
dd8c2aa5c6 | ||
|
|
9e3fce441e | ||
|
|
d4265f9a13 | ||
|
|
2fc024e880 | ||
|
|
eddf468aef | ||
|
|
b0d04b9a81 | ||
|
|
a9de303d8b | ||
|
|
69bd6df464 | ||
|
|
bfb505aa8e | ||
|
|
d732b1ff9d | ||
|
|
ef517bd0da | ||
|
|
32d837cf88 | ||
|
|
7b579caf68 | ||
|
|
272b8003d6 | ||
|
|
1c24c93f73 | ||
|
|
712abc7958 | ||
|
|
5f6d717b7a | ||
|
|
7e1661f4fa | ||
|
|
f9779b24ad | ||
|
|
f1f23f6f11 | ||
|
|
cf26c937e4 | ||
|
|
f19f957668 | ||
|
|
d6572879a8 | ||
|
|
b6ab8f50fa | ||
|
|
c82acc599a | ||
|
|
2447bb58dd | ||
|
|
a55a298e00 | ||
|
|
2929c1832d | ||
|
|
aa2d8583ad | ||
|
|
df2d75a2a3 | ||
|
|
b24b320807 | ||
|
|
c872c1f1dc | ||
|
|
a40610d331 | ||
|
|
38978eb2aa | ||
|
|
ca7c3a3278 | ||
|
|
d58fc68137 | ||
|
|
71c66464c1 | ||
|
|
bf414068a3 | ||
|
|
88959ce600 | ||
|
|
572719872d | ||
|
|
bdea19b583 | ||
|
|
eb1f9c9916 | ||
|
|
d07fb41fe8 | ||
|
|
a9cda850ca | ||
|
|
bef0318c36 | ||
|
|
3f19ea98bb | ||
|
|
c96073f985 | ||
|
|
d2f240c791 | ||
|
|
6491dfbbd6 | ||
|
|
d9cfa5fcd3 | ||
|
|
89b14639a9 | ||
|
|
3f744c0361 | ||
|
|
9ed7fb4916 | ||
|
|
4280e68de3 | ||
|
|
f162d7bd97 | ||
|
|
36990aeafd | ||
|
|
9fe51e392b | ||
|
|
7e879a45d5 | ||
|
|
1c911c5f40 | ||
|
|
bd8dc17b7a | ||
|
|
8491a29ec3 | ||
|
|
81d21850ec | ||
|
|
c6ec3fdfba | ||
|
|
88c3dd49c6 | ||
|
|
6869f6d9dd | ||
|
|
3f643acb99 | ||
|
|
ea73accefd | ||
|
|
555d54371c | ||
|
|
bab4c90c45 | ||
|
|
a2fc0b14d6 | ||
|
|
bf66e9a529 | ||
|
|
fde8c38638 | ||
|
|
e6252dee5a | ||
|
|
ecb042aa1c | ||
|
|
e29009d347 | ||
|
|
9631d65552 | ||
|
|
7b7be66fa1 | ||
|
|
b99aaab42e | ||
|
|
32bd1b31e9 | ||
|
|
f287b15e71 | ||
|
|
586466584f | ||
|
|
c0b4bf0a3e | ||
|
|
acf46cc3b5 | ||
|
|
06ef8248c3 | ||
|
|
7c2ae4eaf7 | ||
|
|
989d7af9ac | ||
|
|
8a6c3aa3cd | ||
|
|
62b560510b | ||
|
|
0edfb32621 | ||
|
|
7e0f1eb8b5 | ||
|
|
b43e8337b1 | ||
|
|
30040fba45 | ||
|
|
44cf9ac62f | ||
|
|
3b55357045 | ||
|
|
18d9a20ff6 | ||
|
|
6590aba6d2 | ||
|
|
2e81f27d27 | ||
|
|
ae3c05aa37 | ||
|
|
cef044178c | ||
|
|
c998d1ac8c | ||
|
|
3457e504cf | ||
|
|
7f0cca075a | ||
|
|
088c595e01 | ||
|
|
26b4b466df | ||
|
|
fdf691fdcc | ||
|
|
88c8c2d6cd | ||
|
|
ef585037a0 | ||
|
|
362ebdcbab | ||
|
|
b251454dd6 | ||
|
|
21c8693d9c | ||
|
|
1e7e5e297c | ||
|
|
c7f180ffa9 | ||
|
|
b8bd8d6a03 | ||
|
|
baec331e84 | ||
|
|
557f382477 | ||
|
|
23b166b318 | ||
|
|
81a481e098 | ||
|
|
5b3090dffc | ||
|
|
3ef3fefd54 | ||
|
|
28e25eac78 | ||
|
|
b0c9ae7490 | ||
|
|
143e7fe300 | ||
|
|
f09e7ca764 | ||
|
|
fae284d6b9 | ||
|
|
83d8e01c81 | ||
|
|
110458cd10 | ||
|
|
e3eec89d24 | ||
|
|
54ae364def | ||
|
|
16a100b597 | ||
|
|
cbc5d78a09 | ||
|
|
d8a2975a68 | ||
|
|
66d911653f | ||
|
|
ca6f795504 | ||
|
|
2af0f11731 | ||
|
|
c3408f4f04 | ||
|
|
b92c324254 | ||
|
|
81bee93b8d | ||
|
|
670f9788e3 | ||
|
|
f187a16962 | ||
|
|
e031f2b614 | ||
|
|
c2b7b82ef4 | ||
|
|
02ad9d6072 | ||
|
|
ea9408ccbb | ||
|
|
307765591d | ||
|
|
5d859b2178 | ||
|
|
223967fd32 | ||
|
|
274b35154c | ||
|
|
c05ced08bb | ||
|
|
c7722fbb1b | ||
|
|
f163bed40d | ||
|
|
b4772849f9 | ||
|
|
839a758a36 | ||
|
|
aebfceeafb | ||
|
|
83d7ec09c1 | ||
|
|
8c29f69b00 | ||
|
|
ce9d36d954 | ||
|
|
5c765bc63e | ||
|
|
6c7c6bec91 | ||
|
|
ed703c065d | ||
|
|
387584356f | ||
|
|
1111419d4a | ||
|
|
20378821cf | ||
|
|
ce1bfa6de8 | ||
|
|
6c26227081 | ||
|
|
aa4e2b1542 | ||
|
|
1e5ac39ff3 | ||
|
|
ec2295c3dc | ||
|
|
8cf7b88cc5 | ||
|
|
48bfebe442 | ||
|
|
df60b3c733 | ||
|
|
584cb61bb8 | ||
|
|
c1798cc89a | ||
|
|
3c8fabd116 | ||
|
|
36e51d0cee | ||
|
|
7d0645fb3a | ||
|
|
7c339e248a | ||
|
|
b62ed5dc90 | ||
|
|
f0641a0406 | ||
|
|
3d060f8b64 | ||
|
|
052a7b8eec | ||
|
|
9531cddb06 | ||
|
|
6fe9a613c0 | ||
|
|
b729a4e83c | ||
|
|
a0683d3c1f | ||
|
|
66fda7a37f | ||
|
|
a63bc9254d | ||
|
|
14fa0097b0 | ||
|
|
d99c397746 | ||
|
|
e3777b1dd9 | ||
|
|
63c03758e6 | ||
|
|
ce419c9835 | ||
|
|
0c2b708484 | ||
|
|
166e998788 | ||
|
|
267a0a3dfa | ||
|
|
985fd7d4e7 | ||
|
|
5479a6e33e | ||
|
|
fb4186f6b9 | ||
|
|
7571582000 | ||
|
|
12b4971b70 | ||
|
|
5c0b98abf0 | ||
|
|
30d4a2cf53 | ||
|
|
92bc7caf7a | ||
|
|
19202bae81 | ||
|
|
e7a4512a90 | ||
|
|
ff822e64ca | ||
|
|
7cb87f863e | ||
|
|
67d8396af4 | ||
|
|
8b0cc376f4 | ||
|
|
9e5c4df106 | ||
|
|
fd8749f42a | ||
|
|
5c13765168 | ||
|
|
3099af70a3 | ||
|
|
fd1b8491db | ||
|
|
1961f2ef54 | ||
|
|
631c78e655 | ||
|
|
e0f8b767ba | ||
|
|
d0d015361c | ||
|
|
81b7e5c7a8 | ||
|
|
9e32cc283f | ||
|
|
1126410e62 | ||
|
|
2c46214291 | ||
|
|
d13bd5b9b5 | ||
|
|
c8c70a3750 | ||
|
|
882a1a1ccc | ||
|
|
8690d62146 | ||
|
|
85117d554f | ||
|
|
4487f70f08 | ||
|
|
fb27388101 | ||
|
|
5e7ccc983d | ||
|
|
72fa2b4537 | ||
|
|
5399d91965 | ||
|
|
53a0bbeb5b | ||
|
|
384a862940 | ||
|
|
029f52880b | ||
|
|
5b05df215a | ||
|
|
a13cd7b7c4 | ||
|
|
d524924b80 | ||
|
|
e6d740ce09 | ||
|
|
cea4f82586 | ||
|
|
1d6ce115da | ||
|
|
06d2dfa31c | ||
|
|
d547873b17 | ||
|
|
01721a840a | ||
|
|
52f6d5aafc | ||
|
|
2211a5f1b8 | ||
|
|
1ffa6adcd4 | ||
|
|
add57a6938 | ||
|
|
dafa5073cb | ||
|
|
65e05a06fb | ||
|
|
5c9354894b | ||
|
|
b01e69e08f | ||
|
|
8601f29d95 | ||
|
|
0aee722e3f | ||
|
|
beb6d40ce6 | ||
|
|
19db921555 | ||
|
|
68b9e9e7e7 | ||
|
|
2d84b02bc4 | ||
|
|
8b2801bd46 | ||
|
|
7d7e21aebb | ||
|
|
bf14e5ce1b | ||
|
|
d531080b7e | ||
|
|
a6b8a5487a | ||
|
|
6c0d53a1c5 | ||
|
|
9f14433cbd | ||
|
|
50a817e3d3 | ||
|
|
5a4a57700b | ||
|
|
3de5a3157f | ||
|
|
50dec08002 | ||
|
|
e71ef905f9 | ||
|
|
3d197c1449 | ||
|
|
65de2d68c0 | ||
|
|
1103ad2d08 | ||
|
|
eab947cf42 | ||
|
|
0fe9e95250 | ||
|
|
c7946ab9ab | ||
|
|
f5df3b4795 | ||
|
|
f26325c988 | ||
|
|
7c14cdb60e | ||
|
|
0e02328c98 | ||
|
|
380524ae27 | ||
|
|
0286e61aee | ||
|
|
ff29aed05d | ||
|
|
64f2c61813 | ||
|
|
525c04fd07 | ||
|
|
efac90461a | ||
|
|
71979376b5 | ||
|
|
5a1ae862a7 | ||
|
|
6df20734f9 | ||
|
|
98bce72295 | ||
|
|
2f1756489e | ||
|
|
9719640e34 | ||
|
|
ce02ab613d | ||
|
|
37de2dbd3b | ||
|
|
a82500f162 | ||
|
|
2dede2fdc2 | ||
|
|
13fbb96736 | ||
|
|
eabfcea34e | ||
|
|
a078703214 | ||
|
|
bd2b22572f | ||
|
|
5f69f04909 | ||
|
|
a1a426e523 | ||
|
|
a091b1a3ee | ||
|
|
556a51120c | ||
|
|
eb391a53c1 | ||
|
|
e17e09ea3c | ||
|
|
76c423392a | ||
|
|
8e6d756e3a | ||
|
|
a7c9058375 | ||
|
|
b16e33bcf5 | ||
|
|
197af49c99 | ||
|
|
264cc4020f | ||
|
|
df88421087 |
4
.github/ISSUE_TEMPLATE.md
vendored
4
.github/ISSUE_TEMPLATE.md
vendored
@@ -24,6 +24,10 @@
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,6 +7,10 @@
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this PR fixing a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, and the tests you ran to -->
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -22,3 +22,5 @@ parts/
|
||||
prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
healthcheck
|
||||
|
||||
40
.travis.yml
40
.travis.yml
@@ -1,30 +1,50 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
dist: trusty
|
||||
|
||||
language: go
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
go: 1.10.1
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- make test GOFLAGS="-timeout 15m -race -v"
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 15m "$d"; done
|
||||
- make verifiers
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- make coverage
|
||||
- node --version
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
|
||||
- bash buildscripts/go-coverage.sh
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- nvm install stable
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
@@ -68,4 +68,4 @@ To remove a dependency
|
||||
- Run `make pkg-remove PKG=foo/bar` from top-level directory
|
||||
|
||||
### What are the coding guidelines for Minio?
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](slack.minio.io).
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.minio.io).
|
||||
|
||||
35
Dockerfile
35
Dockerfile
@@ -1,34 +1,39 @@
|
||||
FROM golang:1.10.1-alpine3.7
|
||||
FROM golang:1.11.4-alpine3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
go build -ldflags "-s -w" -o /usr/bin/healthcheck dockerscripts/healthcheck.go
|
||||
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
EXPOSE 9000
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates curl && \
|
||||
apk add --no-cache --virtual .build-deps git && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s \
|
||||
CMD /usr/bin/healthcheck.sh
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -1,27 +1,20 @@
|
||||
FROM golang:1.10.1-alpine3.7
|
||||
FROM alpine:3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates curl && \
|
||||
apk add --no-cache --virtual .build-deps git && \
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -29,7 +22,6 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s \
|
||||
CMD /usr/bin/healthcheck.sh
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
@@ -1,21 +1,34 @@
|
||||
FROM golang:1.11.4-alpine3.7
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio/dockerscripts && \
|
||||
go build -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go
|
||||
|
||||
FROM alpine:3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps curl && \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck.sh
|
||||
chmod +x /usr/bin/healthcheck
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
@@ -23,7 +36,6 @@ ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s \
|
||||
CMD /usr/bin/healthcheck.sh
|
||||
HEALTHCHECK --interval=1m CMD healthcheck
|
||||
|
||||
CMD ["minio"]
|
||||
|
||||
49
Dockerfile.simpleci
Normal file
49
Dockerfile.simpleci
Normal file
@@ -0,0 +1,49 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.11.4
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
|
||||
RUN make verify
|
||||
RUN make coverage || true
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio ./minio
|
||||
COPY buildscripts/gateway-tests.sh ./gateway-tests.sh
|
||||
RUN apt-get update && apt-get install -y git wget jq curl dnsmasq
|
||||
|
||||
RUN wget https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
RUN mkdir -p /go
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
RUN ./gateway-tests.sh
|
||||
47
Makefile
47
Makefile
@@ -13,41 +13,33 @@ checks:
|
||||
@(env bash $(PWD)/buildscripts/checkgopath.sh)
|
||||
|
||||
getdeps:
|
||||
@echo "Installing golint" && go get -u github.com/golang/lint/golint
|
||||
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo
|
||||
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||
@echo "Installing golint" && go get -u golang.org/x/lint/golint
|
||||
@echo "Installing staticcheck" && go get -u honnef.co/go/tools/...
|
||||
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
|
||||
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
|
||||
|
||||
verifiers: getdeps vet fmt lint cyclo deadcode spelling
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
|
||||
@go vet github.com/minio/minio/...
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@gofmt -d cmd
|
||||
@gofmt -d pkg
|
||||
@gofmt -d cmd/
|
||||
@gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
ineffassign:
|
||||
staticcheck:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/ineffassign .
|
||||
|
||||
cyclo:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/gocyclo -over 100 cmd
|
||||
@${GOPATH}/bin/gocyclo -over 100 pkg
|
||||
|
||||
deadcode:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/deadcode -test $(shell go list ./...) || true
|
||||
@${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@@ -60,7 +52,9 @@ spelling:
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@go test $(GOFLAGS) -tags kqueue ./...
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
@@ -71,7 +65,11 @@ coverage: build
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build --ldflags="-s -w" -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
pkg-add:
|
||||
@echo "Adding new package $(PKG)"
|
||||
@@ -97,6 +95,7 @@ install: build
|
||||
clean:
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@find . -name '*~' | xargs rm -fv
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
|
||||
11
README.md
11
README.md
@@ -17,7 +17,7 @@ docker run -p 9000:9000 minio/minio server /data
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
|
||||
Note: Docker will not display the autogenerated keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use autogenerated keys with containers. Please visit Minio Docker quickstart guide for more information [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
@@ -53,6 +53,15 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| Platform| Architecture | URL|
|
||||
| ----------| -------- | ------|
|
||||
|GNU/Linux|ppc64le|https://dl.minio.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.minio.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
## Microsoft Windows
|
||||
### Binary Download
|
||||
| Platform| Architecture | URL|
|
||||
|
||||
55
appveyor.yml
55
appveyor.yml
@@ -1,55 +0,0 @@
|
||||
# version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
# Platform.
|
||||
platform: x64
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\minio\minio
|
||||
|
||||
# Environment variables
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOROOT: c:\go
|
||||
|
||||
# scripts that run after cloning repository
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH%
|
||||
- go version
|
||||
- go env
|
||||
- python --version
|
||||
|
||||
# To run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
# Compile
|
||||
# We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
|
||||
- ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
|
||||
- appveyor AddCompilationMessage "Starting Compile"
|
||||
- cd c:\gopath\src\github.com\minio\minio
|
||||
- go run buildscripts/gen-ldflags.go > temp.txt
|
||||
- set /p BUILD_LDFLAGS=<temp.txt
|
||||
- go build -ldflags="%BUILD_LDFLAGS%" -o %GOPATH%\bin\minio.exe
|
||||
- appveyor AddCompilationMessage "Compile Success"
|
||||
|
||||
# To run your custom scripts instead of automatic tests
|
||||
test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G )
|
||||
- go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
- go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html
|
||||
- ps: Push-AppveyorArtifact build\coverage\coverage.txt
|
||||
- ps: Push-AppveyorArtifact build\coverage\coverage.html
|
||||
# Upload coverage report.
|
||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
||||
- pip install codecov
|
||||
- codecov -X gcov -f "build\coverage\coverage.txt"
|
||||
|
||||
# to disable deployment
|
||||
deploy: off
|
||||
@@ -5,13 +5,13 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Minio Browser</title>
|
||||
<link rel="stylesheet" href="loader.css" type="text/css">
|
||||
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="page-load">
|
||||
<div class="pl-inner">
|
||||
<img src="logo.svg" alt="">
|
||||
<img src="/minio/logo.svg" alt="">
|
||||
</div>
|
||||
</div>
|
||||
<div id="root"></div>
|
||||
@@ -51,6 +51,6 @@
|
||||
<![endif]-->
|
||||
|
||||
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
|
||||
<script src="index_bundle.js"></script>
|
||||
<script src="/minio/index_bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -22,7 +22,7 @@ export let alertId = 0
|
||||
export const set = alert => {
|
||||
const id = alertId++
|
||||
return (dispatch, getState) => {
|
||||
if (alert.type !== "danger") {
|
||||
if (alert.type !== "danger" || alert.autoClear) {
|
||||
setTimeout(() => {
|
||||
dispatch({
|
||||
type: CLEAR,
|
||||
|
||||
@@ -67,8 +67,12 @@ const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
fetchBuckets: () => dispatch(actionsBuckets.fetchBuckets()),
|
||||
setBucketList: buckets => dispatch(actionsBuckets.setList(buckets)),
|
||||
selectBucket: bucket => dispatch(actionsBuckets.selectBucket(bucket))
|
||||
selectBucket: (bucket, prefix) =>
|
||||
dispatch(actionsBuckets.selectBucket(bucket, prefix))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(BucketList)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(BucketList)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE } from '../constants'
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE, NONE } from '../constants'
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
@@ -42,37 +42,40 @@ export class Policy extends React.Component {
|
||||
render() {
|
||||
const {policy, prefix} = this.props
|
||||
let newPrefix = prefix
|
||||
|
||||
if (newPrefix === '')
|
||||
newPrefix = '*'
|
||||
|
||||
return (
|
||||
<div className="pmb-list">
|
||||
<div className="pmbl-item">
|
||||
{ newPrefix }
|
||||
if (policy === NONE) {
|
||||
return <noscript />
|
||||
} else {
|
||||
return (
|
||||
<div className="pmb-list">
|
||||
<div className="pmbl-item">
|
||||
{ newPrefix }
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<select className="form-control"
|
||||
disabled
|
||||
value={ policy }>
|
||||
<option value={ READ_ONLY }>
|
||||
Read Only
|
||||
</option>
|
||||
<option value={ WRITE_ONLY }>
|
||||
Write Only
|
||||
</option>
|
||||
<option value={ READ_WRITE }>
|
||||
Read and Write
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<button className="btn btn-block btn-danger" onClick={ this.removePolicy.bind(this) }>
|
||||
Remove
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<select className="form-control"
|
||||
disabled
|
||||
value={ policy }>
|
||||
<option value={ READ_ONLY }>
|
||||
Read Only
|
||||
</option>
|
||||
<option value={ WRITE_ONLY }>
|
||||
Write Only
|
||||
</option>
|
||||
<option value={ READ_WRITE }>
|
||||
Read and Write
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<button className="btn btn-block btn-danger" onClick={ this.removePolicy.bind(this) }>
|
||||
Remove
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
import React from "react"
|
||||
import { shallow, mount } from "enzyme"
|
||||
import { Policy } from "../Policy"
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE } from "../../constants"
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE, NONE } from "../../constants"
|
||||
import web from "../../web"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
@@ -31,6 +31,11 @@ describe("Policy", () => {
|
||||
shallow(<Policy currentBucket={"bucket"} prefix={"foo"} policy={READ_ONLY} />)
|
||||
})
|
||||
|
||||
it("should not render when policy is listed as 'none'", () => {
|
||||
const wrapper = shallow(<Policy currentBucket={"bucket"} prefix={"foo"} policy={NONE} />)
|
||||
expect(wrapper.find(".pmb-list").length).toBe(0)
|
||||
})
|
||||
|
||||
it("should call web.setBucketPolicy and fetchPolicies on submit", () => {
|
||||
const fetchPolicies = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
@@ -23,6 +23,7 @@ export const minioBrowserPrefix = p.slice(0, p.indexOf("/", 1))
|
||||
export const READ_ONLY = "readonly"
|
||||
export const WRITE_ONLY = "writeonly"
|
||||
export const READ_WRITE = "readwrite"
|
||||
export const NONE = "none"
|
||||
|
||||
export const SHARE_OBJECT_EXPIRY_DAYS = 5
|
||||
export const SHARE_OBJECT_EXPIRY_HOURS = 0
|
||||
|
||||
@@ -57,7 +57,7 @@ export class ObjectActions extends React.Component {
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { object, showShareObjectModal } = this.props
|
||||
const { object, showShareObjectModal, shareObjectName } = this.props
|
||||
return (
|
||||
<Dropdown id={`obj-actions-${object.name}`}>
|
||||
<Dropdown.Toggle noCaret className="fia-toggle" />
|
||||
@@ -77,7 +77,8 @@ export class ObjectActions extends React.Component {
|
||||
<i className="fa fa-trash" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{showShareObjectModal && <ShareObjectModal object={object} />}
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
<ShareObjectModal object={object} />}
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
@@ -92,7 +93,8 @@ export class ObjectActions extends React.Component {
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
showShareObjectModal: state.objects.shareObject.show
|
||||
showShareObjectModal: state.objects.shareObject.show,
|
||||
shareObjectName: state.objects.shareObject.object
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -88,8 +88,21 @@ describe("ObjectActions", () => {
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
showShareObjectModal={true}
|
||||
shareObjectName={"obj1"}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("Connect(ShareObjectModal)").length).toBe(1)
|
||||
})
|
||||
|
||||
it("shouldn't render ShareObjectModal when the names of the objects don't match", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
showShareObjectModal={true}
|
||||
shareObjectName={"obj2"}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("Connect(ShareObjectModal)").length).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,16 +19,29 @@ import thunk from "redux-thunk"
|
||||
import * as actionsObjects from "../actions"
|
||||
import * as alertActions from "../../alert/actions"
|
||||
import { minioBrowserPrefix } from "../../constants"
|
||||
import history from "../../history"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
LoggedIn: jest.fn(() => true).mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(() => {
|
||||
return Promise.resolve({
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
istruncated: false,
|
||||
nextmarker: "test2",
|
||||
writable: false
|
||||
})
|
||||
LoggedIn: jest
|
||||
.fn(() => true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
return Promise.reject({
|
||||
message: "listobjects is denied"
|
||||
})
|
||||
} else {
|
||||
return Promise.resolve({
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
istruncated: false,
|
||||
nextmarker: "test2",
|
||||
writable: false
|
||||
})
|
||||
}
|
||||
}),
|
||||
RemoveObject: jest.fn(({ bucketName, objects }) => {
|
||||
if (!bucketName) {
|
||||
@@ -160,6 +173,41 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/RESET_LIST after failing to fetch the objects from bucket with ListObjects denied for LoggedIn users", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-deny" },
|
||||
objects: { currentPrefix: "" }
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "danger",
|
||||
message: "listobjects is denied",
|
||||
id: alertActions.alertId,
|
||||
autoClear: true
|
||||
}
|
||||
},
|
||||
{
|
||||
type: "object/RESET_LIST"
|
||||
}
|
||||
]
|
||||
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("redirect to login after failing to fetch the objects from bucket for non-LoggedIn users", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-deny" },
|
||||
objects: { currentPrefix: "" }
|
||||
})
|
||||
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
|
||||
expect(history.location.pathname.endsWith("/login")).toBeTruthy()
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SORT_BY and objects/SET_SORT_ORDER when sortObjects is called", () => {
|
||||
const store = mockStore({
|
||||
objects: {
|
||||
@@ -244,7 +292,11 @@ describe("Objects actions", () => {
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: { type: "danger", message: "Invalid bucket", id: 0 }
|
||||
alert: {
|
||||
type: "danger",
|
||||
message: "Invalid bucket",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store.dispatch(actionsObjects.deleteObject("obj1")).then(() => {
|
||||
@@ -355,7 +407,7 @@ describe("Objects actions", () => {
|
||||
store.dispatch(actionsObjects.downloadObject("obj1"))
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=''`
|
||||
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=`
|
||||
expect(setLocation).toHaveBeenCalledWith(url)
|
||||
})
|
||||
|
||||
|
||||
@@ -16,17 +16,15 @@
|
||||
|
||||
import web from "../web"
|
||||
import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
} from "../utils"
|
||||
import { sortObjectsByName, sortObjectsBySize, sortObjectsByDate } from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import { minioBrowserPrefix } from "../constants"
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "object/RESET_LIST"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -45,6 +43,10 @@ export const setList = (objects, marker, isTruncated) => ({
|
||||
isTruncated
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
})
|
||||
|
||||
export const appendList = (objects, marker, isTruncated) => ({
|
||||
type: APPEND_LIST,
|
||||
objects,
|
||||
@@ -54,47 +56,54 @@ export const appendList = (objects, marker, isTruncated) => ({
|
||||
|
||||
export const fetchObjects = append => {
|
||||
return function(dispatch, getState) {
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix, marker }
|
||||
} = getState()
|
||||
const {buckets: {currentBucket}, objects: {currentPrefix, marker}} = getState()
|
||||
if (currentBucket) {
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix,
|
||||
marker: append ? marker : ""
|
||||
})
|
||||
.then(res => {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
}
|
||||
})
|
||||
}
|
||||
if (append) {
|
||||
dispatch(appendList(objects, res.nextmarker, res.istruncated))
|
||||
} else {
|
||||
dispatch(setList(objects, res.nextmarker, res.istruncated))
|
||||
dispatch(setSortBy(""))
|
||||
dispatch(setSortOrder(false))
|
||||
}
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(alertActions.set({ type: "danger", message: err.message }))
|
||||
history.push("/login")
|
||||
})
|
||||
}
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix,
|
||||
marker: append ? marker : ""
|
||||
})
|
||||
.then(res => {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
}
|
||||
})
|
||||
}
|
||||
if (append) {
|
||||
dispatch(appendList(objects, res.nextmarker, res.istruncated))
|
||||
} else {
|
||||
dispatch(setList(objects, res.nextmarker, res.istruncated))
|
||||
dispatch(setSortBy(""))
|
||||
dispatch(setSortOrder(false))
|
||||
}
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
})
|
||||
.catch(err => {
|
||||
if (web.LoggedIn()) {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true
|
||||
})
|
||||
)
|
||||
dispatch(resetList())
|
||||
} else {
|
||||
history.push("/login")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
const {objects} = getState()
|
||||
const sortOrder = objects.sortBy == sortBy ? !objects.sortOrder : true
|
||||
dispatch(setSortBy(sortBy))
|
||||
dispatch(setSortOrder(sortOrder))
|
||||
@@ -194,30 +203,39 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
})
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
dispatch(showShareObject(object, `${location.host}` + '/' + `${currentBucket}` + '/' + encodeURI(objectName)))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +281,7 @@ export const downloadObject = object => {
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=''`
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
@@ -292,7 +310,7 @@ export const downloadCheckedObjects = () => {
|
||||
objects: getCheckedList(state)
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token=''"
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
return web
|
||||
@@ -303,14 +321,13 @@ export const downloadCheckedObjects = () => {
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
.catch(err => dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -333,8 +350,7 @@ const downloadZip = (url, req, dispatch) => {
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
anchor.download = req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
window.URL.revokeObjectURL(blobUrl)
|
||||
|
||||
@@ -50,6 +50,13 @@ export default (
|
||||
marker: action.marker,
|
||||
isTruncated: action.isTruncated
|
||||
}
|
||||
case actionsObjects.RESET_LIST:
|
||||
return {
|
||||
...state,
|
||||
list: [],
|
||||
marker: "",
|
||||
isTruncated: false
|
||||
}
|
||||
case actionsObjects.APPEND_LIST:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -109,7 +109,15 @@ div.fesl-row {
|
||||
/*--------------------------
|
||||
Icons
|
||||
----------------------------*/
|
||||
&[data-type=folder] { .list-type(#a1d6dd, '\f114'); }
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f114');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
}
|
||||
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
|
||||
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
|
||||
&[data-type=audio] { .list-type(#009688, '\f1c7'); }
|
||||
|
||||
@@ -70,9 +70,9 @@ async.waterfall([
|
||||
commitId = stdout.replace('\n', '')
|
||||
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
|
||||
assetsFileName = 'ui-assets.go';
|
||||
var cmd = 'go-bindata-assetfs -pkg browser -nocompress=true production/...'
|
||||
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
|
||||
if (!isProduction) {
|
||||
cmd = 'go-bindata-assetfs -pkg browser -nocompress=true dev/...'
|
||||
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
|
||||
}
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
"setupTestFrameworkScriptFile": "./app/js/jest/setup.js",
|
||||
"testURL": "https://localhost:8080",
|
||||
"moduleNameMapper": {
|
||||
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$":
|
||||
"<rootDir>/app/js/jest/__mocks__/fileMock.js",
|
||||
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/app/js/jest/__mocks__/fileMock.js",
|
||||
"\\.(css|scss)$": "identity-obj-proxy"
|
||||
}
|
||||
},
|
||||
@@ -40,29 +39,31 @@
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.14.0",
|
||||
"copy-webpack-plugin": "^0.3.3",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.3.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^2.30.1",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^2.7.1",
|
||||
"less-loader": "^2.2.3",
|
||||
"purifycss-webpack-plugin": "^2.0.3",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-dev-server": "^2.11.1"
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"bootstrap": "^3.3.6",
|
||||
"classnames": "^2.2.3",
|
||||
"expect": "^1.20.2",
|
||||
"font-awesome": "^4.7.0",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
@@ -89,6 +90,6 @@
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^3.10.0"
|
||||
"webpack": "^4.28.3"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,11 +16,13 @@
|
||||
|
||||
var webpack = require('webpack')
|
||||
var path = require('path')
|
||||
var glob = require('glob-all')
|
||||
var CopyWebpackPlugin = require('copy-webpack-plugin')
|
||||
var purify = require("purifycss-webpack-plugin")
|
||||
var PurgecssPlugin = require('purgecss-webpack-plugin')
|
||||
|
||||
var exports = {
|
||||
context: __dirname,
|
||||
mode: 'development',
|
||||
entry: [
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
],
|
||||
@@ -99,12 +101,11 @@ var exports = {
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new purify({
|
||||
basePath: __dirname,
|
||||
paths: [
|
||||
"app/index.html",
|
||||
"app/js/*.js"
|
||||
]
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
path.join(__dirname, 'app/index.html'),
|
||||
path.join(__dirname, 'app/js/*.js')
|
||||
])
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
@@ -16,11 +16,13 @@
|
||||
|
||||
var webpack = require('webpack')
|
||||
var path = require('path')
|
||||
var glob = require('glob-all')
|
||||
var CopyWebpackPlugin = require('copy-webpack-plugin')
|
||||
var purify = require("purifycss-webpack-plugin")
|
||||
var PurgecssPlugin = require('purgecss-webpack-plugin')
|
||||
|
||||
var exports = {
|
||||
context: __dirname,
|
||||
mode: 'production',
|
||||
entry: [
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
],
|
||||
@@ -74,16 +76,12 @@ var exports = {
|
||||
{from: 'app/img/logo.svg'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
new webpack.DefinePlugin({
|
||||
'process.env.NODE_ENV': '"production"'
|
||||
}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new purify({
|
||||
basePath: __dirname,
|
||||
paths: [
|
||||
"app/index.html",
|
||||
"app/js/*.js"
|
||||
]
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
path.join(__dirname, 'app/index.html'),
|
||||
path.join(__dirname, 'app/js/*.js')
|
||||
])
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
3591
browser/yarn.lock
3591
browser/yarn.lock
File diff suppressed because it is too large
Load Diff
@@ -89,11 +89,11 @@ check_minimum_version() {
|
||||
|
||||
assert_is_supported_arch() {
|
||||
case "${ARCH}" in
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* )
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x )
|
||||
return
|
||||
;;
|
||||
*)
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*]"
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x]"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
35
buildscripts/cross-compile.sh
Executable file
35
buildscripts/cross-compile.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enable tracing if set.
|
||||
[ -n "$BASH_XTRACEFD" ] && set -ex
|
||||
|
||||
function _init() {
|
||||
## All binaries are static make sure to disable CGO.
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
|
||||
}
|
||||
|
||||
function _build_and_sign() {
|
||||
local osarch=$1
|
||||
IFS=/ read -r -a arr <<<"$osarch"
|
||||
os="${arr[0]}"
|
||||
arch="${arr[1]}"
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
go build -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
|
||||
for each_osarch in ${SUPPORTED_OSARCH}; do
|
||||
_build_and_sign "${each_osarch}"
|
||||
done
|
||||
}
|
||||
|
||||
_init && main "$@"
|
||||
60
buildscripts/gateway-tests.sh
Executable file
60
buildscripts/gateway-tests.sh
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
export SERVER_ENDPOINT=127.0.0.1:24240
|
||||
export ENABLE_HTTPS=0
|
||||
export ACCESS_KEY=minio
|
||||
export SECRET_KEY=minio123
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
|
||||
trap "cat server.log;cat gateway.log" SIGHUP SIGINT SIGTERM
|
||||
|
||||
./minio --quiet --json server data --address 127.0.0.1:24242 > server.log &
|
||||
sleep 3
|
||||
./minio --quiet --json gateway s3 http://127.0.0.1:24242 --address 127.0.0.1:24240 > gateway.log &
|
||||
sleep 3
|
||||
|
||||
mkdir -p /mint
|
||||
git clone https://github.com/minio/mint /mint
|
||||
cd /mint
|
||||
|
||||
export MINT_ROOT_DIR=${MINT_ROOT_DIR:-/mint}
|
||||
export MINT_RUN_CORE_DIR="$MINT_ROOT_DIR/run/core"
|
||||
export MINT_RUN_SECURITY_DIR="$MINT_ROOT_DIR/run/security"
|
||||
export WGET="wget --quiet --no-check-certificate"
|
||||
|
||||
go get github.com/go-ini/ini
|
||||
|
||||
./create-data-files.sh
|
||||
./preinstall.sh
|
||||
|
||||
# install mint app packages
|
||||
for pkg in "build"/*/install.sh; do
|
||||
echo "Running $pkg"
|
||||
$pkg
|
||||
done
|
||||
|
||||
./postinstall.sh
|
||||
|
||||
/mint/entrypoint.sh || cat server.log gateway.log fail
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
go test -coverprofile=profile.out -covermode=atomic "$d"
|
||||
CGO_ENABLED=0 go test -v -coverprofile=profile.out -covermode=atomic "$d"
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
|
||||
@@ -68,9 +68,41 @@ function start_minio_erasure_sets()
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 35
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
@@ -99,6 +131,8 @@ function start_minio_dist_erasure_sets()
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
@@ -157,6 +191,34 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets) )
|
||||
@@ -233,6 +295,7 @@ function run_test_gateway_s3()
|
||||
{
|
||||
minio_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
|
||||
export SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
@@ -275,6 +338,7 @@ function __init__()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
|
||||
chmod a+x "$FUNCTIONAL_TESTS"
|
||||
}
|
||||
|
||||
@@ -315,6 +379,13 @@ function main()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets with ipv6"
|
||||
if ! run_test_dist_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Gateway S3 setup"
|
||||
if ! run_test_gateway_s3; then
|
||||
echo "FAILED"
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -56,26 +57,28 @@ type accessControlPolicy struct {
|
||||
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -89,7 +92,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -103,27 +106,29 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getObjectACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object)
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,7 +142,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -20,12 +20,15 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
// healStatusSummary - overall short summary of a healing sequence
|
||||
@@ -59,9 +62,8 @@ var (
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(err error) error {
|
||||
errCode := toAPIErrorCode(err)
|
||||
apiErr := getAPIError(errCode)
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
return fmt.Errorf("Heal internal error: %s: %s",
|
||||
apiErr.Code, apiErr.Description)
|
||||
}
|
||||
@@ -110,6 +112,32 @@ func initAllHealState(isErasureMode bool) {
|
||||
globalAllHealState = allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go globalAllHealState.periodicHealSeqsClean()
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
|
||||
delete(ahs.healSeqMap, path)
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getHealSequence - Retrieve a heal sequence by path. The second
|
||||
@@ -121,6 +149,35 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
|
||||
return h, exists
|
||||
}
|
||||
|
||||
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
var hsp madmin.HealStopSuccess
|
||||
he, exists := ahs.getHealSequence(path)
|
||||
if !exists {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: "invalid",
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: he.clientToken,
|
||||
ClientAddress: he.clientAddress,
|
||||
StartTime: he.startTime,
|
||||
}
|
||||
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
// Heal sequence explicitly stopped, remove it.
|
||||
delete(ahs.healSeqMap, path)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErr(context.Background(), err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
// healing according to the healSequence argument. For each heal
|
||||
// sequence, state is stored in the `globalAllHealState`, which is a
|
||||
@@ -132,7 +189,7 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
respBytes []byte, errCode APIErrorCode, errMsg string) {
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
@@ -141,22 +198,21 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
existsAndLive = true
|
||||
}
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
// A heal sequence exists on the given path.
|
||||
if h.forceStarted {
|
||||
// stop the running heal sequence - wait for
|
||||
// it to finish.
|
||||
// stop the running heal sequence - wait for it to finish.
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
} else {
|
||||
errMsg = "Heal is already running on the given path " +
|
||||
"(use force-start option to stop and start afresh). " +
|
||||
fmt.Sprintf("The heal was started by IP %s at %s",
|
||||
h.clientAddress, h.startTime)
|
||||
|
||||
return nil, ErrHealAlreadyRunning, errMsg
|
||||
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
|
||||
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
|
||||
return nil, errorCodes.ToAPIErr(ErrHealAlreadyRunning), errMsg
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,7 +227,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
return nil, ErrHealOverlappingPaths, errMsg
|
||||
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,51 +237,16 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
go func() {
|
||||
var keepStateTimeout <-chan time.Time
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
everyMinute := ticker.C
|
||||
for {
|
||||
select {
|
||||
// Check every minute if heal sequence has ended.
|
||||
case <-everyMinute:
|
||||
if h.hasEnded() {
|
||||
keepStateTimeout = time.After(keepHealSeqStateDuration)
|
||||
everyMinute = nil
|
||||
}
|
||||
|
||||
// This case does not fire until the heal
|
||||
// sequence completes.
|
||||
case <-keepStateTimeout:
|
||||
// Heal sequence has ended, keep
|
||||
// results state duration has elapsed,
|
||||
// so purge state.
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
delete(ahs.healSeqMap, h.path)
|
||||
return
|
||||
|
||||
case <-globalServiceDoneCh:
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: h.clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return nil, ErrInternalError, ""
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, toAPIError(h.ctx, err), ""
|
||||
}
|
||||
return b, ErrNone, ""
|
||||
return b, noError, ""
|
||||
}
|
||||
|
||||
// PopHealStatusJSON - Called by heal-status API. It fetches the heal
|
||||
@@ -270,7 +291,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
@@ -283,12 +304,15 @@ type healSequence struct {
|
||||
// bucket, and prefix on which heal seq. was initiated
|
||||
bucket, objPrefix string
|
||||
|
||||
// path is just bucket + "/" + objPrefix
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// time at which heal sequence was started
|
||||
startTime time.Time
|
||||
|
||||
// time at which heal sequence has ended
|
||||
endTime time.Time
|
||||
|
||||
// Heal client info
|
||||
clientToken, clientAddress string
|
||||
|
||||
@@ -328,7 +352,7 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
return &healSequence{
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: bucket + "/" + objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientAddress: clientAddr,
|
||||
@@ -383,7 +407,6 @@ func (h *healSequence) stop() {
|
||||
// sequence automatically resumes. The return value indicates if the
|
||||
// operation succeeded.
|
||||
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
// start a timer to keep an upper time limit to find an empty
|
||||
// slot to add the given heal result - if no slot is found it
|
||||
// means that the server is holding the maximum amount of
|
||||
@@ -467,6 +490,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
@@ -480,6 +504,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
@@ -519,6 +544,12 @@ func (h *healSequence) traverseAndHeal() {
|
||||
// Start with format healing
|
||||
checkErr(h.healDiskFormat)
|
||||
|
||||
// Start healing the config prefix.
|
||||
checkErr(h.healMinioSysMeta(minioConfigPrefix))
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
checkErr(h.healMinioSysMeta(bucketConfigPrefix))
|
||||
|
||||
// Heal buckets and objects
|
||||
checkErr(h.healBuckets)
|
||||
|
||||
@@ -529,9 +560,74 @@ func (h *healSequence) traverseAndHeal() {
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
// which in-turn heals the respective meta directory path and any files in int.
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
for isTruncated {
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Lists all objects under `config` prefix.
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, minioMetaBucket, metaPrefix,
|
||||
marker, "", 1000)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for index := range objectInfos.Objects {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
o := objectInfos.Objects[index]
|
||||
res, herr := objectAPI.HealObject(h.ctx, o.Bucket, o.Name, h.settings.DryRun, h.settings.Remove)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this file an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
continue
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
res.Type = madmin.HealItemBucketMetadata
|
||||
if err = h.pushHealResultItem(res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
isTruncated = objectInfos.IsTruncated
|
||||
marker = objectInfos.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func (h *healSequence) healDiskFormat() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
@@ -542,13 +638,18 @@ func (h *healSequence) healDiskFormat() error {
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
if err != nil && err != errNoHealRequired {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers only if healing succeeded.
|
||||
if err == nil {
|
||||
peersReInitFormat(globalAdminPeers, h.settings.DryRun)
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(h.settings.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(h.ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(h.ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push format heal result
|
||||
@@ -557,6 +658,10 @@ func (h *healSequence) healDiskFormat() error {
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket)
|
||||
@@ -570,7 +675,7 @@ func (h *healSequence) healBuckets() error {
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
@@ -584,36 +689,29 @@ func (h *healSequence) healBuckets() error {
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun)
|
||||
// push any available results before checking for error
|
||||
for _, result := range results {
|
||||
if perr := h.pushHealResultItem(result); perr != nil {
|
||||
return perr
|
||||
}
|
||||
}
|
||||
result, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun, h.settings.Remove)
|
||||
// handle heal-bucket error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = h.pushHealResultItem(result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix)
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
if err == nil {
|
||||
err = h.healObject(bucket, h.objPrefix)
|
||||
if err != nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -622,17 +720,40 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := runtime.NumCPU()
|
||||
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
for isTruncated {
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
|
||||
h.objPrefix, marker, "", 1000)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
for _, o := range objectInfos.Objects {
|
||||
if err := h.healObject(o.Bucket, o.Name); err != nil {
|
||||
// Heal numCPU * nodes objects at a time.
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
|
||||
h.objPrefix, marker, "", entries)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
g := errgroup.WithNErrs(len(objectInfos.Objects))
|
||||
for index := range objectInfos.Objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
o := objectInfos.Objects[index]
|
||||
return h.healObject(o.Bucket, o.Name)
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -655,7 +776,10 @@ func (h *healSequence) healObject(bucket, object string) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun)
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove)
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
hri.Detail = err.Error()
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018, 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -31,7 +31,7 @@ type adminAPIHandlers struct {
|
||||
}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router) {
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
@@ -53,26 +53,71 @@ func registerAdminRouter(router *mux.Router) {
|
||||
// Info operations
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
/// Lock operations
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// List Locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/locks").HandlerFunc(httpTraceAll(adminAPI.ListLocksHandler))
|
||||
// Clear locks
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/locks").HandlerFunc(httpTraceAll(adminAPI.ClearLocksHandler))
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
/// Heal operations
|
||||
/// Health operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateAdminCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceAll(adminAPI.UpdateCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceAll(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceAll(adminAPI.SetConfigHandler))
|
||||
// Get config keys/values
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
|
||||
// Set config keys/values
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove user IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
|
||||
// If none of the routes match, return error.
|
||||
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
|
||||
}
|
||||
|
||||
@@ -1,551 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
|
||||
|
||||
// AdminRPCClient - admin RPC client talks to admin RPC server.
|
||||
type AdminRPCClient struct {
|
||||
*RPCClient
|
||||
}
|
||||
|
||||
// SignalService - calls SignalService RPC.
|
||||
func (rpcClient *AdminRPCClient) SignalService(signal serviceSignal) (err error) {
|
||||
args := SignalServiceArgs{Sig: signal}
|
||||
reply := VoidReply{}
|
||||
|
||||
return rpcClient.Call(adminServiceName+".SignalService", &args, &reply)
|
||||
}
|
||||
|
||||
// ReInitFormat - re-initialize disk format, remotely.
|
||||
func (rpcClient *AdminRPCClient) ReInitFormat(dryRun bool) error {
|
||||
args := ReInitFormatArgs{DryRun: dryRun}
|
||||
reply := VoidReply{}
|
||||
|
||||
return rpcClient.Call(adminServiceName+".ReInitFormat", &args, &reply)
|
||||
}
|
||||
|
||||
// ListLocks - Sends list locks command to remote server via RPC.
|
||||
func (rpcClient *AdminRPCClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
args := ListLocksQuery{
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
Duration: duration,
|
||||
}
|
||||
var reply []VolumeLockInfo
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".ListLocks", &args, &reply)
|
||||
return reply, err
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info of the server to which the RPC call is made.
|
||||
func (rpcClient *AdminRPCClient) ServerInfo() (sid ServerInfoData, err error) {
|
||||
err = rpcClient.Call(adminServiceName+".ServerInfo", &AuthArgs{}, &sid)
|
||||
return sid, err
|
||||
}
|
||||
|
||||
// GetConfig - returns config.json of the remote server.
|
||||
func (rpcClient *AdminRPCClient) GetConfig() ([]byte, error) {
|
||||
args := AuthArgs{}
|
||||
var reply []byte
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".GetConfig", &args, &reply)
|
||||
return reply, err
|
||||
}
|
||||
|
||||
// WriteTmpConfig - writes config file content to a temporary file on a remote node.
|
||||
func (rpcClient *AdminRPCClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
|
||||
args := WriteConfigArgs{
|
||||
TmpFileName: tmpFileName,
|
||||
Buf: configBytes,
|
||||
}
|
||||
reply := VoidReply{}
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".WriteTmpConfig", &args, &reply)
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// CommitConfig - Move the new config in tmpFileName onto config.json on a remote node.
|
||||
func (rpcClient *AdminRPCClient) CommitConfig(tmpFileName string) error {
|
||||
args := CommitConfigArgs{FileName: tmpFileName}
|
||||
reply := VoidReply{}
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".CommitConfig", &args, &reply)
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// NewAdminRPCClient - returns new admin RPC client.
|
||||
func NewAdminRPCClient(host *xnet.Host) (*AdminRPCClient, error) {
|
||||
scheme := "http"
|
||||
if globalIsSSL {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
serviceURL := &xnet.URL{
|
||||
Scheme: scheme,
|
||||
Host: host.String(),
|
||||
Path: adminServicePath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: host.Name,
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
rpcClient, err := NewRPCClient(
|
||||
RPCClientArgs{
|
||||
NewAuthTokenFunc: newAuthToken,
|
||||
RPCVersion: globalRPCAPIVersion,
|
||||
ServiceName: adminServiceName,
|
||||
ServiceURL: serviceURL,
|
||||
TLSConfig: tlsConfig,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AdminRPCClient{rpcClient}, nil
|
||||
}
|
||||
|
||||
// adminCmdRunner - abstracts local and remote execution of admin
|
||||
// commands like service stop and service restart.
|
||||
type adminCmdRunner interface {
|
||||
SignalService(s serviceSignal) error
|
||||
ReInitFormat(dryRun bool) error
|
||||
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ServerInfo() (ServerInfoData, error)
|
||||
GetConfig() ([]byte, error)
|
||||
WriteTmpConfig(tmpFileName string, configBytes []byte) error
|
||||
CommitConfig(tmpFileName string) error
|
||||
}
|
||||
|
||||
// adminPeer - represents an entity that implements admin API RPCs.
|
||||
type adminPeer struct {
|
||||
addr string
|
||||
cmdRunner adminCmdRunner
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
// type alias for a collection of adminPeer.
|
||||
type adminPeers []adminPeer
|
||||
|
||||
// makeAdminPeers - helper function to construct a collection of adminPeer.
|
||||
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
|
||||
localAddr := GetLocalPeer(endpoints)
|
||||
if strings.HasPrefix(localAddr, "127.0.0.1:") {
|
||||
// Use first IPv4 instead of loopback address.
|
||||
localAddr = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
|
||||
}
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: localAddr,
|
||||
cmdRunner: localAdminClient{},
|
||||
isLocal: true,
|
||||
})
|
||||
|
||||
for _, hostStr := range GetRemotePeers(endpoints) {
|
||||
host, err := xnet.ParseHost(hostStr)
|
||||
logger.FatalIf(err, "Unable to parse Admin RPC Host", context.Background())
|
||||
rpcClient, err := NewAdminRPCClient(host)
|
||||
logger.FatalIf(err, "Unable to initialize Admin RPC Client", context.Background())
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: hostStr,
|
||||
cmdRunner: rpcClient,
|
||||
})
|
||||
}
|
||||
|
||||
return adminPeerList
|
||||
}
|
||||
|
||||
// peersReInitFormat - reinitialize remote object layers to new format.
|
||||
func peersReInitFormat(peers adminPeers, dryRun bool) error {
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Send ReInitFormat RPC call to all nodes.
|
||||
// for local adminPeer this is a no-op.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
if !peer.isLocal {
|
||||
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
|
||||
}
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize global adminPeer collection.
|
||||
func initGlobalAdminPeers(endpoints EndpointList) {
|
||||
globalAdminPeers = makeAdminPeers(endpoints)
|
||||
}
|
||||
|
||||
// invokeServiceCmd - Invoke Restart/Stop command.
|
||||
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
|
||||
switch cmd {
|
||||
case serviceRestart, serviceStop:
|
||||
err = cp.cmdRunner.SignalService(cmd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// sendServiceCmd - Invoke Restart command on remote peers
|
||||
// adminPeer followed by on the local peer.
|
||||
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
|
||||
// Send service command like stop or restart to all remote nodes and finally run on local node.
|
||||
errs := make([]error, len(cps))
|
||||
var wg sync.WaitGroup
|
||||
remotePeers := cps[1:]
|
||||
for i := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
|
||||
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
errs[0] = invokeServiceCmd(cps[0], cmd)
|
||||
}
|
||||
|
||||
// listPeerLocksInfo - fetch list of locks held on the given bucket,
|
||||
// matching prefix held longer than duration from all peer servers.
|
||||
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
// Used to aggregate volume lock information from all nodes.
|
||||
allLocks := make([][]VolumeLockInfo, len(peers))
|
||||
errs := make([]error, len(peers))
|
||||
var wg sync.WaitGroup
|
||||
localPeer := peers[0]
|
||||
remotePeers := peers[1:]
|
||||
for i, remotePeer := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int, remotePeer adminPeer) {
|
||||
defer wg.Done()
|
||||
// `remotePeers` is right-shifted by one position relative to `peers`
|
||||
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
}(i+1, remotePeer)
|
||||
}
|
||||
wg.Wait()
|
||||
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
|
||||
// Summarizing errors received for ListLocks RPC across all
|
||||
// nodes. N B the possible unavailability of quorum in errors
|
||||
// applies only to distributed setup.
|
||||
errCount, err := reduceErrs(errs, []error{})
|
||||
if err != nil {
|
||||
if errCount >= (len(peers)/2 + 1) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
// Group lock information across nodes by (bucket, object)
|
||||
// pair. For readability only.
|
||||
paramLockMap := make(map[nsParam][]VolumeLockInfo)
|
||||
for _, nodeLocks := range allLocks {
|
||||
for _, lockInfo := range nodeLocks {
|
||||
param := nsParam{
|
||||
volume: lockInfo.Bucket,
|
||||
path: lockInfo.Object,
|
||||
}
|
||||
paramLockMap[param] = append(paramLockMap[param], lockInfo)
|
||||
}
|
||||
}
|
||||
groupedLockInfos := []VolumeLockInfo{}
|
||||
for _, volLocks := range paramLockMap {
|
||||
groupedLockInfos = append(groupedLockInfos, volLocks...)
|
||||
}
|
||||
return groupedLockInfos, nil
|
||||
}
|
||||
|
||||
// uptimeSlice - used to sort uptimes in chronological order.
|
||||
type uptimeSlice []struct {
|
||||
err error
|
||||
uptime time.Duration
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Less(i, j int) bool {
|
||||
return ts[i].uptime < ts[j].uptime
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Swap(i, j int) {
|
||||
ts[i], ts[j] = ts[j], ts[i]
|
||||
}
|
||||
|
||||
// getPeerUptimes - returns the uptime since the last time read quorum
|
||||
// was established on success. Otherwise returns errXLReadQuorum.
|
||||
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
||||
// In a single node Erasure or FS backend setup the uptime of
|
||||
// the setup is the uptime of the single minio server
|
||||
// instance.
|
||||
if !globalIsDistXL {
|
||||
return UTCNow().Sub(globalBootTime), nil
|
||||
}
|
||||
|
||||
uptimes := make(uptimeSlice, len(peers))
|
||||
|
||||
// Get up time of all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
serverInfoData, rpcErr := peer.cmdRunner.ServerInfo()
|
||||
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Sort uptimes in chronological order.
|
||||
sort.Sort(uptimes)
|
||||
|
||||
// Pick the readQuorum'th uptime in chronological order. i.e,
|
||||
// the time at which read quorum was (re-)established.
|
||||
readQuorum := len(uptimes) / 2
|
||||
validCount := 0
|
||||
latestUptime := time.Duration(0)
|
||||
for _, uptime := range uptimes {
|
||||
if uptime.err != nil {
|
||||
logger.LogIf(context.Background(), uptime.err)
|
||||
continue
|
||||
}
|
||||
|
||||
validCount++
|
||||
if validCount >= readQuorum {
|
||||
latestUptime = uptime.uptime
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Less than readQuorum "Admin.Uptime" RPC call returned
|
||||
// successfully, so read-quorum unavailable.
|
||||
if validCount < readQuorum {
|
||||
return time.Duration(0), InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
return latestUptime, nil
|
||||
}
|
||||
|
||||
// getPeerConfig - Fetches config.json from all nodes in the setup and
|
||||
// returns the one that occurs in a majority of them.
|
||||
func getPeerConfig(peers adminPeers) ([]byte, error) {
|
||||
if !globalIsDistXL {
|
||||
return peers[0].cmdRunner.GetConfig()
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
configs := make([][]byte, len(peers))
|
||||
|
||||
// Get config from all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
configs[idx], errs[idx] = peer.cmdRunner.GetConfig()
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Find the maximally occurring config among peers in a
|
||||
// distributed setup.
|
||||
|
||||
serverConfigs := make([]serverConfig, len(peers))
|
||||
for i, configBytes := range configs {
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Unmarshal the received config files.
|
||||
err := json.Unmarshal(configBytes, &serverConfigs[i])
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configJSON, err := getValidServerConfig(serverConfigs, errs)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the config.json that was present quorum or more
|
||||
// number of disks.
|
||||
return json.Marshal(configJSON)
|
||||
}
|
||||
|
||||
// getValidServerConfig - finds the server config that is present in
|
||||
// quorum or more number of servers.
|
||||
func getValidServerConfig(serverConfigs []serverConfig, errs []error) (scv serverConfig, e error) {
|
||||
// majority-based quorum
|
||||
quorum := len(serverConfigs)/2 + 1
|
||||
|
||||
// Count the number of disks a config.json was found in.
|
||||
configCounter := make([]int, len(serverConfigs))
|
||||
|
||||
// We group equal serverConfigs by the lowest index of the
|
||||
// same value; e.g, let us take the following serverConfigs
|
||||
// in a 4-node setup,
|
||||
// serverConfigs == [c1, c2, c1, c1]
|
||||
// configCounter == [3, 1, 0, 0]
|
||||
// c1, c2 are the only distinct values that appear. c1 is
|
||||
// identified by 0, the lowest index it appears in and c2 is
|
||||
// identified by 1. So, we need to find the number of times
|
||||
// each of these distinct values occur.
|
||||
|
||||
// Invariants:
|
||||
|
||||
// 1. At the beginning of the i-th iteration, the number of
|
||||
// unique configurations seen so far is equal to the number of
|
||||
// non-zero counter values in config[:i].
|
||||
|
||||
// 2. At the beginning of the i-th iteration, the sum of
|
||||
// elements of configCounter[:i] is equal to the number of
|
||||
// non-error configurations seen so far.
|
||||
|
||||
// For each of the serverConfig ...
|
||||
for i := range serverConfigs {
|
||||
// Skip nodes where getConfig failed.
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
// Check if it is equal to any of the configurations
|
||||
// seen so far. If j == i is reached then we have an
|
||||
// unseen configuration.
|
||||
for j := 0; j <= i; j++ {
|
||||
if j < i && configCounter[j] == 0 {
|
||||
// serverConfigs[j] is known to be
|
||||
// equal to a value that was already
|
||||
// seen. See example above for
|
||||
// clarity.
|
||||
continue
|
||||
} else if j < i && serverConfigs[i].ConfigDiff(&serverConfigs[j]) == "" {
|
||||
// serverConfigs[i] is equal to
|
||||
// serverConfigs[j], update
|
||||
// serverConfigs[j]'s counter since it
|
||||
// is the lower index.
|
||||
configCounter[j]++
|
||||
break
|
||||
} else if j == i {
|
||||
// serverConfigs[i] is equal to no
|
||||
// other value seen before. It is
|
||||
// unique so far.
|
||||
configCounter[i] = 1
|
||||
break
|
||||
} // else invariants specified above are violated.
|
||||
}
|
||||
}
|
||||
|
||||
// We find the maximally occurring server config and check if
|
||||
// there is quorum.
|
||||
var configJSON serverConfig
|
||||
maxOccurrence := 0
|
||||
for i, count := range configCounter {
|
||||
if maxOccurrence < count {
|
||||
maxOccurrence = count
|
||||
configJSON = serverConfigs[i]
|
||||
}
|
||||
}
|
||||
|
||||
// If quorum nodes don't agree.
|
||||
if maxOccurrence < quorum {
|
||||
return scv, errXLWriteQuorum
|
||||
}
|
||||
|
||||
return configJSON, nil
|
||||
}
|
||||
|
||||
// Write config contents into a temporary file on all nodes.
|
||||
func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error {
|
||||
// For a single-node minio server setup.
|
||||
if !globalIsDistXL {
|
||||
err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Write config into temporary file on all nodes.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Return bytes written and errors (if any) during writing
|
||||
// temporary config file.
|
||||
return errs
|
||||
}
|
||||
|
||||
// Move config contents from the given temporary file onto config.json
|
||||
// on all nodes.
|
||||
func commitConfigPeers(peers adminPeers, tmpFileName string) []error {
|
||||
// For a single-node minio server setup.
|
||||
if !globalIsDistXL {
|
||||
return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)}
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Rename temporary config file into configDir/config.json on
|
||||
// all nodes.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName)
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Return errors (if any) received during rename.
|
||||
return errs
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xrpc "github.com/minio/minio/cmd/rpc"
|
||||
)
|
||||
|
||||
const adminServiceName = "Admin"
|
||||
const adminServiceSubPath = "/admin"
|
||||
|
||||
var adminServicePath = path.Join(minioReservedBucketPath, adminServiceSubPath)
|
||||
|
||||
// adminRPCReceiver - Admin RPC receiver for admin RPC server.
|
||||
type adminRPCReceiver struct {
|
||||
local *localAdminClient
|
||||
}
|
||||
|
||||
// SignalServiceArgs - provides the signal argument to SignalService RPC
|
||||
type SignalServiceArgs struct {
|
||||
AuthArgs
|
||||
Sig serviceSignal
|
||||
}
|
||||
|
||||
// SignalService - Send a restart or stop signal to the service
|
||||
func (receiver *adminRPCReceiver) SignalService(args *SignalServiceArgs, reply *VoidReply) error {
|
||||
return receiver.local.SignalService(args.Sig)
|
||||
}
|
||||
|
||||
// ListLocksQuery - wraps ListLocks API's query values to send over RPC.
|
||||
type ListLocksQuery struct {
|
||||
AuthArgs
|
||||
Bucket string
|
||||
Prefix string
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// ListLocks - lists locks held by requests handled by this server instance.
|
||||
func (receiver *adminRPCReceiver) ListLocks(args *ListLocksQuery, reply *[]VolumeLockInfo) (err error) {
|
||||
*reply, err = receiver.local.ListLocks(args.Bucket, args.Prefix, args.Duration)
|
||||
return err
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info when object layer was initialized on this server.
|
||||
func (receiver *adminRPCReceiver) ServerInfo(args *AuthArgs, reply *ServerInfoData) (err error) {
|
||||
*reply, err = receiver.local.ServerInfo()
|
||||
return err
|
||||
}
|
||||
|
||||
// GetConfig - returns the config.json of this server.
|
||||
func (receiver *adminRPCReceiver) GetConfig(args *AuthArgs, reply *[]byte) (err error) {
|
||||
*reply, err = receiver.local.GetConfig()
|
||||
return err
|
||||
}
|
||||
|
||||
// ReInitFormatArgs - provides dry-run information to re-initialize format.json
|
||||
type ReInitFormatArgs struct {
|
||||
AuthArgs
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// ReInitFormat - re-init 'format.json'
|
||||
func (receiver *adminRPCReceiver) ReInitFormat(args *ReInitFormatArgs, reply *VoidReply) error {
|
||||
return receiver.local.ReInitFormat(args.DryRun)
|
||||
}
|
||||
|
||||
// WriteConfigArgs - wraps the bytes to be written and temporary file name.
|
||||
type WriteConfigArgs struct {
|
||||
AuthArgs
|
||||
TmpFileName string
|
||||
Buf []byte
|
||||
}
|
||||
|
||||
// WriteTmpConfig - writes the supplied config contents onto the
|
||||
// supplied temporary file.
|
||||
func (receiver *adminRPCReceiver) WriteTmpConfig(args *WriteConfigArgs, reply *VoidReply) error {
|
||||
return receiver.local.WriteTmpConfig(args.TmpFileName, args.Buf)
|
||||
}
|
||||
|
||||
// CommitConfigArgs - wraps the config file name that needs to be
|
||||
// committed into config.json on this node.
|
||||
type CommitConfigArgs struct {
|
||||
AuthArgs
|
||||
FileName string
|
||||
}
|
||||
|
||||
// CommitConfig - Renames the temporary file into config.json on this node.
|
||||
func (receiver *adminRPCReceiver) CommitConfig(args *CommitConfigArgs, reply *VoidReply) error {
|
||||
return receiver.local.CommitConfig(args.FileName)
|
||||
}
|
||||
|
||||
// NewAdminRPCServer - returns new admin RPC server.
|
||||
func NewAdminRPCServer() (*xrpc.Server, error) {
|
||||
rpcServer := xrpc.NewServer()
|
||||
if err := rpcServer.RegisterName(adminServiceName, &adminRPCReceiver{&localAdminClient{}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rpcServer, nil
|
||||
}
|
||||
|
||||
// registerAdminRPCRouter - creates and registers Admin RPC server and its router.
|
||||
func registerAdminRPCRouter(router *mux.Router) {
|
||||
rpcServer, err := NewAdminRPCServer()
|
||||
logger.FatalIf(err, "Unable to initialize Lock RPC Server", context.Background())
|
||||
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
subrouter.Path(adminServiceSubPath).Handler(rpcServer)
|
||||
}
|
||||
@@ -1,611 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// localAdminClient and AdminRPCClient are adminCmdRunner interface compatible,
|
||||
// hence below test functions are available for both clients.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Admin RPC server, adminRPCReceiver and AdminRPCClient are
|
||||
// inter-dependent, below test functions are sufficient to test all of them.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func testAdminCmdRunnerSignalService(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalServiceSignalCh := globalServiceSignalCh
|
||||
globalServiceSignalCh = make(chan serviceSignal, 10)
|
||||
defer func() {
|
||||
globalServiceSignalCh = tmpGlobalServiceSignalCh
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
signal serviceSignal
|
||||
expectErr bool
|
||||
}{
|
||||
{serviceRestart, false},
|
||||
{serviceStop, false},
|
||||
{serviceStatus, true},
|
||||
{serviceSignal(100), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.SignalService(testCase.signal)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerReInitFormat(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
defer func() {
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
objectAPI ObjectLayer
|
||||
dryRun bool
|
||||
expectErr bool
|
||||
}{
|
||||
{&DummyObjectLayer{}, true, false},
|
||||
{&DummyObjectLayer{}, false, false},
|
||||
{nil, true, true},
|
||||
{nil, false, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
err := client.ReInitFormat(testCase.dryRun)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerListLocks(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
defer func() {
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
objectAPI ObjectLayer
|
||||
expectErr bool
|
||||
}{
|
||||
{&DummyObjectLayer{}, false},
|
||||
{nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
_, err := client.ListLocks("", "", time.Duration(0))
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerServerInfo(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalBootTime := globalBootTime
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
tmpGlobalConnStats := globalConnStats
|
||||
tmpGlobalHTTPStats := globalHTTPStats
|
||||
tmpGlobalNotificationSys := globalNotificationSys
|
||||
defer func() {
|
||||
globalBootTime = tmpGlobalBootTime
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
globalConnStats = tmpGlobalConnStats
|
||||
globalHTTPStats = tmpGlobalHTTPStats
|
||||
globalNotificationSys = tmpGlobalNotificationSys
|
||||
}()
|
||||
|
||||
endpoints := new(EndpointList)
|
||||
|
||||
notificationSys := NewNotificationSys(globalServerConfig, *endpoints)
|
||||
|
||||
testCases := []struct {
|
||||
bootTime time.Time
|
||||
objectAPI ObjectLayer
|
||||
connStats *ConnStats
|
||||
httpStats *HTTPStats
|
||||
notificationSys *NotificationSys
|
||||
expectErr bool
|
||||
}{
|
||||
{UTCNow(), &DummyObjectLayer{}, newConnStats(), newHTTPStats(), notificationSys, false},
|
||||
{time.Time{}, nil, nil, nil, nil, true},
|
||||
{UTCNow(), nil, nil, nil, nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalBootTime = testCase.bootTime
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
globalConnStats = testCase.connStats
|
||||
globalHTTPStats = testCase.httpStats
|
||||
globalNotificationSys = testCase.notificationSys
|
||||
_, err := client.ServerInfo()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerGetConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalServerConfig := globalServerConfig
|
||||
defer func() {
|
||||
globalServerConfig = tmpGlobalServerConfig
|
||||
}()
|
||||
|
||||
config := newServerConfig()
|
||||
|
||||
testCases := []struct {
|
||||
config *serverConfig
|
||||
expectErr bool
|
||||
}{
|
||||
{globalServerConfig, false},
|
||||
{config, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalServerConfig = testCase.config
|
||||
_, err := client.GetConfig()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerWriteTmpConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpConfigDir := configDir
|
||||
defer func() {
|
||||
configDir = tmpConfigDir
|
||||
}()
|
||||
|
||||
tempDir, err := ioutil.TempDir("", ".AdminCmdRunnerWriteTmpConfig.")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
configDir = &ConfigDir{dir: tempDir}
|
||||
|
||||
testCases := []struct {
|
||||
tmpFilename string
|
||||
configBytes []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{"config1.json", []byte(`{"version":"23","region":"us-west-1a"}`), false},
|
||||
// Overwrite test.
|
||||
{"config1.json", []byte(`{"version":"23","region":"us-west-1a","browser":"on"}`), false},
|
||||
{"config2.json", []byte{}, false},
|
||||
{"config3.json", nil, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.WriteTmpConfig(testCase.tmpFilename, testCase.configBytes)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerCommitConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpConfigDir := configDir
|
||||
defer func() {
|
||||
configDir = tmpConfigDir
|
||||
}()
|
||||
|
||||
tempDir, err := ioutil.TempDir("", ".AdminCmdRunnerCommitConfig.")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
configDir = &ConfigDir{dir: tempDir}
|
||||
err = ioutil.WriteFile(filepath.Join(tempDir, "config.json"), []byte{}, os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
err = client.WriteTmpConfig("config1.json", []byte(`{"version":"23","region":"us-west-1a"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
tmpFilename string
|
||||
expectErr bool
|
||||
}{
|
||||
{"config1.json", false},
|
||||
{"config2.json", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.CommitConfig(testCase.tmpFilename)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newAdminRPCHTTPServerClient(t *testing.T) (*httptest.Server, *AdminRPCClient, *serverConfig) {
|
||||
rpcServer, err := NewAdminRPCServer()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rpcServer.ServeHTTP(w, r)
|
||||
}))
|
||||
|
||||
url, err := xnet.ParseURL(httpServer.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(url.Host)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
prevGlobalServerConfig := globalServerConfig
|
||||
globalServerConfig = newServerConfig()
|
||||
|
||||
rpcClient, err := NewAdminRPCClient(host)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
return httpServer, rpcClient, prevGlobalServerConfig
|
||||
}
|
||||
|
||||
func TestAdminRPCClientSignalService(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerSignalService(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientReInitFormat(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerReInitFormat(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientListLocks(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerListLocks(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientServerInfo(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerServerInfo(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientGetConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerGetConfig(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientWriteTmpConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerWriteTmpConfig(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientCommitConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerCommitConfig(t, rpcClient)
|
||||
}
|
||||
|
||||
var (
|
||||
config1 = []byte(`{
|
||||
"version": "13",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"logger": {
|
||||
"console": {
|
||||
"enable": true,
|
||||
"level": "debug"
|
||||
},
|
||||
"file": {
|
||||
"enable": false,
|
||||
"fileName": "",
|
||||
"level": ""
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"clientID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
// diff from config1 - amqp.Enable is True
|
||||
config2 = []byte(`{
|
||||
"version": "13",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"logger": {
|
||||
"console": {
|
||||
"enable": true,
|
||||
"level": "debug"
|
||||
},
|
||||
"file": {
|
||||
"enable": false,
|
||||
"fileName": "",
|
||||
"level": ""
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": true,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"clientID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
)
|
||||
|
||||
// TestGetValidServerConfig - test for getValidServerConfig.
|
||||
func TestGetValidServerConfig(t *testing.T) {
|
||||
var c1, c2 serverConfig
|
||||
err := json.Unmarshal(config1, &c1)
|
||||
if err != nil {
|
||||
t.Fatalf("json unmarshal of %s failed: %v", string(config1), err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(config2, &c2)
|
||||
if err != nil {
|
||||
t.Fatalf("json unmarshal of %s failed: %v", string(config2), err)
|
||||
}
|
||||
|
||||
// Valid config.
|
||||
noErrs := []error{nil, nil, nil, nil}
|
||||
serverConfigs := []serverConfig{c1, c2, c1, c1}
|
||||
validConfig, err := getValidServerConfig(serverConfigs, noErrs)
|
||||
if err != nil {
|
||||
t.Errorf("Expected a valid config but received %v instead", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(validConfig, c1) {
|
||||
t.Errorf("Expected valid config to be %v but received %v", config1, validConfig)
|
||||
}
|
||||
|
||||
// Invalid config - no quorum.
|
||||
serverConfigs = []serverConfig{c1, c2, c2, c1}
|
||||
_, err = getValidServerConfig(serverConfigs, noErrs)
|
||||
if err != errXLWriteQuorum {
|
||||
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
|
||||
}
|
||||
|
||||
// All errors
|
||||
allErrs := []error{errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound}
|
||||
serverConfigs = []serverConfig{{}, {}, {}, {}}
|
||||
_, err = getValidServerConfig(serverConfigs, allErrs)
|
||||
if err != errXLWriteQuorum {
|
||||
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
|
||||
}
|
||||
}
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
const (
|
||||
// Response request id.
|
||||
responseRequestIDKey = "x-amz-request-id"
|
||||
// Deployment id.
|
||||
responseDeploymentIDKey = "x-minio-deployment-id"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
|
||||
@@ -22,6 +22,13 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
@@ -83,6 +90,7 @@ const (
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
ErrRequestTimeTooSkewed
|
||||
@@ -127,8 +135,12 @@ const (
|
||||
ErrMaximumExpires
|
||||
ErrSlowDown
|
||||
ErrInvalidPrefixMarker
|
||||
ErrBadRequest
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
ErrInvalidEncryptionMethod
|
||||
|
||||
// Server-Side-Encryption (with Customer provided key) related API errors.
|
||||
ErrInsecureSSECustomerRequest
|
||||
ErrSSEMultipartEncrypted
|
||||
@@ -140,6 +152,12 @@ const (
|
||||
ErrMissingSSECustomerKeyMD5
|
||||
ErrSSECustomerKeyMD5Mismatch
|
||||
ErrInvalidSSECustomerParameters
|
||||
ErrIncompatibleEncryptionMethod
|
||||
ErrKMSNotConfigured
|
||||
ErrKMSAuthFailure
|
||||
|
||||
ErrNoAccessKey
|
||||
ErrInvalidToken
|
||||
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification
|
||||
@@ -178,14 +196,19 @@ const (
|
||||
// new error codes here.
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
ErrAdminInvalidSecretKey
|
||||
ErrAdminConfigNoQuorum
|
||||
ErrAdminConfigTooLarge
|
||||
ErrAdminConfigBadJSON
|
||||
ErrAdminConfigDuplicateKeys
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
ErrHealInvalidClientToken
|
||||
@@ -193,11 +216,112 @@ const (
|
||||
ErrHealAlreadyRunning
|
||||
ErrHealOverlappingPaths
|
||||
ErrIncorrectContinuationToken
|
||||
|
||||
// S3 Select Errors
|
||||
ErrEmptyRequestBody
|
||||
ErrUnsupportedFunction
|
||||
ErrInvalidExpressionType
|
||||
ErrBusy
|
||||
ErrUnauthorizedAccess
|
||||
ErrExpressionTooLong
|
||||
ErrIllegalSQLFunctionArgument
|
||||
ErrInvalidKeyPath
|
||||
ErrInvalidCompressionFormat
|
||||
ErrInvalidFileHeaderInfo
|
||||
ErrInvalidJSONType
|
||||
ErrInvalidQuoteFields
|
||||
ErrInvalidRequestParameter
|
||||
ErrInvalidDataType
|
||||
ErrInvalidTextEncoding
|
||||
ErrInvalidDataSource
|
||||
ErrInvalidTableAlias
|
||||
ErrMissingRequiredParameter
|
||||
ErrObjectSerializationConflict
|
||||
ErrUnsupportedSQLOperation
|
||||
ErrUnsupportedSQLStructure
|
||||
ErrUnsupportedSyntax
|
||||
ErrUnsupportedRangeHeader
|
||||
ErrLexerInvalidChar
|
||||
ErrLexerInvalidOperator
|
||||
ErrLexerInvalidLiteral
|
||||
ErrLexerInvalidIONLiteral
|
||||
ErrParseExpectedDatePart
|
||||
ErrParseExpectedKeyword
|
||||
ErrParseExpectedTokenType
|
||||
ErrParseExpected2TokenTypes
|
||||
ErrParseExpectedNumber
|
||||
ErrParseExpectedRightParenBuiltinFunctionCall
|
||||
ErrParseExpectedTypeName
|
||||
ErrParseExpectedWhenClause
|
||||
ErrParseUnsupportedToken
|
||||
ErrParseUnsupportedLiteralsGroupBy
|
||||
ErrParseExpectedMember
|
||||
ErrParseUnsupportedSelect
|
||||
ErrParseUnsupportedCase
|
||||
ErrParseUnsupportedCaseClause
|
||||
ErrParseUnsupportedAlias
|
||||
ErrParseUnsupportedSyntax
|
||||
ErrParseUnknownOperator
|
||||
ErrParseMissingIdentAfterAt
|
||||
ErrParseUnexpectedOperator
|
||||
ErrParseUnexpectedTerm
|
||||
ErrParseUnexpectedToken
|
||||
ErrParseUnexpectedKeyword
|
||||
ErrParseExpectedExpression
|
||||
ErrParseExpectedLeftParenAfterCast
|
||||
ErrParseExpectedLeftParenValueConstructor
|
||||
ErrParseExpectedLeftParenBuiltinFunctionCall
|
||||
ErrParseExpectedArgumentDelimiter
|
||||
ErrParseCastArity
|
||||
ErrParseInvalidTypeParam
|
||||
ErrParseEmptySelect
|
||||
ErrParseSelectMissingFrom
|
||||
ErrParseExpectedIdentForGroupName
|
||||
ErrParseExpectedIdentForAlias
|
||||
ErrParseUnsupportedCallWithStar
|
||||
ErrParseNonUnaryAgregateFunctionCall
|
||||
ErrParseMalformedJoin
|
||||
ErrParseExpectedIdentForAt
|
||||
ErrParseAsteriskIsNotAloneInSelectList
|
||||
ErrParseCannotMixSqbAndWildcardInSelectList
|
||||
ErrParseInvalidContextForWildcardInSelectList
|
||||
ErrIncorrectSQLFunctionArgumentType
|
||||
ErrValueParseFailure
|
||||
ErrEvaluatorInvalidArguments
|
||||
ErrIntegerOverflow
|
||||
ErrLikeInvalidInputs
|
||||
ErrCastFailed
|
||||
ErrInvalidCast
|
||||
ErrEvaluatorInvalidTimestampFormatPattern
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing
|
||||
ErrEvaluatorTimestampFormatPatternDuplicateFields
|
||||
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch
|
||||
ErrEvaluatorUnterminatedTimestampFormatPatternToken
|
||||
ErrEvaluatorInvalidTimestampFormatPatternToken
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbol
|
||||
ErrEvaluatorBindingDoesNotExist
|
||||
ErrMissingHeaders
|
||||
ErrInvalidColumnIndex
|
||||
|
||||
ErrAdminConfigNotificationTargetsFailed
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
apiErr, ok := e[errCode]
|
||||
if !ok {
|
||||
return e[ErrInternalError]
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
// descriptions for all the error responses.
|
||||
var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
var errorCodes = errorCodeMap{
|
||||
ErrInvalidCopyDest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.",
|
||||
@@ -338,6 +462,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "Indicates that the version ID specified in the request does not match an existing version.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
Code: "NotImplemented",
|
||||
Description: "A header you provided implies functionality that is not implemented",
|
||||
@@ -540,6 +669,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Invalid marker prefix combination",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBadRequest: {
|
||||
Code: "BadRequest",
|
||||
Description: "400 BadRequest",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
@@ -629,6 +763,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncryptionMethod: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The encryption method specified is not supported",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureSSECustomerRequest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Requests specifying Server Side Encryption with Customer provided keys must be made over a secure connection.",
|
||||
@@ -679,6 +818,31 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The provided encryption parameters did not match the ones used originally.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncompatibleEncryptionMethod: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Server side encryption specified with both SSE-C and SSE-S3 headers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKMSNotConfigured: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Server side encryption specified but KMS is not configured",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKMSAuthFailure: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Server side encryption specified but KMS authorization failed",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoAccessKey: {
|
||||
Code: "AccessDenied",
|
||||
Description: "No AWSAccessKey was presented",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidToken: {
|
||||
Code: "InvalidTokenId",
|
||||
Description: "The security token included in the request is invalid",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
@@ -738,6 +902,21 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The JSON you provided was not well-formed or did not validate against our published format.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchUser: {
|
||||
Code: "XMinioAdminNoSuchUser",
|
||||
Description: "The specified user does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchPolicy: {
|
||||
Code: "XMinioAdminNoSuchPolicy",
|
||||
Description: "The canned policy does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminInvalidArgument: {
|
||||
Code: "XMinioAdminInvalidArgument",
|
||||
Description: "Invalid arguments specified.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidAccessKey: {
|
||||
Code: "XMinioAdminInvalidAccessKey",
|
||||
Description: "The access key is invalid.",
|
||||
@@ -756,14 +935,29 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
ErrAdminConfigTooLarge: {
|
||||
Code: "XMinioAdminConfigTooLarge",
|
||||
Description: fmt.Sprintf("Configuration data provided exceeds the allowed maximum of %d bytes",
|
||||
maxConfigJSONSize),
|
||||
maxEConfigJSONSize),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigBadJSON: {
|
||||
Code: "XMinioAdminConfigBadJSON",
|
||||
Description: "JSON configuration provided is of incorrect format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigDuplicateKeys: {
|
||||
Code: "XMinioAdminConfigDuplicateKeys",
|
||||
Description: "JSON configuration provided has objects with duplicate keys",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigNotificationTargetsFailed: {
|
||||
Code: "XMinioAdminNotificationTargetsTestFailed",
|
||||
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminProfilerNotEnabled: {
|
||||
Code: "XMinioAdminProfilerNotEnabled",
|
||||
Description: "Unable to perform the requested operation because profiling is not enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminCredentialsMismatch: {
|
||||
Code: "XMinioAdminCredentialsMismatch",
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
@@ -794,6 +988,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// Generic Invalid-Request error. Should be used for response errors only for unlikely
|
||||
// corner case errors for which introducing new APIErrorCode is not worth it. LogIf()
|
||||
// should be used to log the error at the source of the error for debugging purposes.
|
||||
@@ -842,21 +1037,460 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The continuation token provided is incorrect",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
//S3 Select API Errors
|
||||
ErrEmptyRequestBody: {
|
||||
Code: "EmptyRequestBody",
|
||||
Description: "Request body cannot be empty.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedFunction: {
|
||||
Code: "UnsupportedFunction",
|
||||
Description: "Encountered an unsupported SQL function.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDataSource: {
|
||||
Code: "InvalidDataSource",
|
||||
Description: "Invalid data source type. Only CSV and JSON are supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidExpressionType: {
|
||||
Code: "InvalidExpressionType",
|
||||
Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBusy: {
|
||||
Code: "Busy",
|
||||
Description: "The service is unavailable. Please retry.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrUnauthorizedAccess: {
|
||||
Code: "UnauthorizedAccess",
|
||||
Description: "You are not authorized to perform this operation",
|
||||
HTTPStatusCode: http.StatusUnauthorized,
|
||||
},
|
||||
ErrExpressionTooLong: {
|
||||
Code: "ExpressionTooLong",
|
||||
Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIllegalSQLFunctionArgument: {
|
||||
Code: "IllegalSqlFunctionArgument",
|
||||
Description: "Illegal argument was used in the SQL function.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidKeyPath: {
|
||||
Code: "InvalidKeyPath",
|
||||
Description: "Key path in the SQL expression is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidCompressionFormat: {
|
||||
Code: "InvalidCompressionFormat",
|
||||
Description: "The file is not in a supported compression format. Only GZIP is supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidFileHeaderInfo: {
|
||||
Code: "InvalidFileHeaderInfo",
|
||||
Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidJSONType: {
|
||||
Code: "InvalidJsonType",
|
||||
Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidQuoteFields: {
|
||||
Code: "InvalidQuoteFields",
|
||||
Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequestParameter: {
|
||||
Code: "InvalidRequestParameter",
|
||||
Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDataType: {
|
||||
Code: "InvalidDataType",
|
||||
Description: "The SQL expression contains an invalid data type.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTextEncoding: {
|
||||
Code: "InvalidTextEncoding",
|
||||
Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTableAlias: {
|
||||
Code: "InvalidTableAlias",
|
||||
Description: "The SQL expression contains an invalid table alias.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingRequiredParameter: {
|
||||
Code: "MissingRequiredParameter",
|
||||
Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectSerializationConflict: {
|
||||
Code: "ObjectSerializationConflict",
|
||||
Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedSQLOperation: {
|
||||
Code: "UnsupportedSqlOperation",
|
||||
Description: "Encountered an unsupported SQL operation.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedSQLStructure: {
|
||||
Code: "UnsupportedSqlStructure",
|
||||
Description: "Encountered an unsupported SQL structure. Check the SQL Reference.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedSyntax: {
|
||||
Code: "UnsupportedSyntax",
|
||||
Description: "Encountered invalid syntax.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedRangeHeader: {
|
||||
Code: "UnsupportedRangeHeader",
|
||||
Description: "Range header is not supported for this operation.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidChar: {
|
||||
Code: "LexerInvalidChar",
|
||||
Description: "The SQL expression contains an invalid character.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidOperator: {
|
||||
Code: "LexerInvalidOperator",
|
||||
Description: "The SQL expression contains an invalid literal.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidLiteral: {
|
||||
Code: "LexerInvalidLiteral",
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidIONLiteral: {
|
||||
Code: "LexerInvalidIONLiteral",
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedDatePart: {
|
||||
Code: "ParseExpectedDatePart",
|
||||
Description: "Did not find the expected date part in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedKeyword: {
|
||||
Code: "ParseExpectedKeyword",
|
||||
Description: "Did not find the expected keyword in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedTokenType: {
|
||||
Code: "ParseExpectedTokenType",
|
||||
Description: "Did not find the expected token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpected2TokenTypes: {
|
||||
Code: "ParseExpected2TokenTypes",
|
||||
Description: "Did not find the expected token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedNumber: {
|
||||
Code: "ParseExpectedNumber",
|
||||
Description: "Did not find the expected number in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedRightParenBuiltinFunctionCall: {
|
||||
Code: "ParseExpectedRightParenBuiltinFunctionCall",
|
||||
Description: "Did not find the expected right parenthesis character in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedTypeName: {
|
||||
Code: "ParseExpectedTypeName",
|
||||
Description: "Did not find the expected type name in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedWhenClause: {
|
||||
Code: "ParseExpectedWhenClause",
|
||||
Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedToken: {
|
||||
Code: "ParseUnsupportedToken",
|
||||
Description: "The SQL expression contains an unsupported token.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedLiteralsGroupBy: {
|
||||
Code: "ParseUnsupportedLiteralsGroupBy",
|
||||
Description: "The SQL expression contains an unsupported use of GROUP BY.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedMember: {
|
||||
Code: "ParseExpectedMember",
|
||||
Description: "The SQL expression contains an unsupported use of MEMBER.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedSelect: {
|
||||
Code: "ParseUnsupportedSelect",
|
||||
Description: "The SQL expression contains an unsupported use of SELECT.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedCase: {
|
||||
Code: "ParseUnsupportedCase",
|
||||
Description: "The SQL expression contains an unsupported use of CASE.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedCaseClause: {
|
||||
Code: "ParseUnsupportedCaseClause",
|
||||
Description: "The SQL expression contains an unsupported use of CASE.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedAlias: {
|
||||
Code: "ParseUnsupportedAlias",
|
||||
Description: "The SQL expression contains an unsupported use of ALIAS.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedSyntax: {
|
||||
Code: "ParseUnsupportedSyntax",
|
||||
Description: "The SQL expression contains unsupported syntax.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnknownOperator: {
|
||||
Code: "ParseUnknownOperator",
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseMissingIdentAfterAt: {
|
||||
Code: "ParseMissingIdentAfterAt",
|
||||
Description: "Did not find the expected identifier after the @ symbol in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedOperator: {
|
||||
Code: "ParseUnexpectedOperator",
|
||||
Description: "The SQL expression contains an unexpected operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedTerm: {
|
||||
Code: "ParseUnexpectedTerm",
|
||||
Description: "The SQL expression contains an unexpected term.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedToken: {
|
||||
Code: "ParseUnexpectedToken",
|
||||
Description: "The SQL expression contains an unexpected token.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedKeyword: {
|
||||
Code: "ParseUnexpectedKeyword",
|
||||
Description: "The SQL expression contains an unexpected keyword.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedExpression: {
|
||||
Code: "ParseExpectedExpression",
|
||||
Description: "Did not find the expected SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedLeftParenAfterCast: {
|
||||
Code: "ParseExpectedLeftParenAfterCast",
|
||||
Description: "Did not find expected the left parenthesis in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedLeftParenValueConstructor: {
|
||||
Code: "ParseExpectedLeftParenValueConstructor",
|
||||
Description: "Did not find expected the left parenthesis in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedLeftParenBuiltinFunctionCall: {
|
||||
Code: "ParseExpectedLeftParenBuiltinFunctionCall",
|
||||
Description: "Did not find the expected left parenthesis in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedArgumentDelimiter: {
|
||||
Code: "ParseExpectedArgumentDelimiter",
|
||||
Description: "Did not find the expected argument delimiter in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseCastArity: {
|
||||
Code: "ParseCastArity",
|
||||
Description: "The SQL expression CAST has incorrect arity.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseInvalidTypeParam: {
|
||||
Code: "ParseInvalidTypeParam",
|
||||
Description: "The SQL expression contains an invalid parameter value.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseEmptySelect: {
|
||||
Code: "ParseEmptySelect",
|
||||
Description: "The SQL expression contains an empty SELECT.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseSelectMissingFrom: {
|
||||
Code: "ParseSelectMissingFrom",
|
||||
Description: "GROUP is not supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedIdentForGroupName: {
|
||||
Code: "ParseExpectedIdentForGroupName",
|
||||
Description: "GROUP is not supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedIdentForAlias: {
|
||||
Code: "ParseExpectedIdentForAlias",
|
||||
Description: "Did not find the expected identifier for the alias in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedCallWithStar: {
|
||||
Code: "ParseUnsupportedCallWithStar",
|
||||
Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseNonUnaryAgregateFunctionCall: {
|
||||
Code: "ParseNonUnaryAgregateFunctionCall",
|
||||
Description: "Only one argument is supported for aggregate functions in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseMalformedJoin: {
|
||||
Code: "ParseMalformedJoin",
|
||||
Description: "JOIN is not supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedIdentForAt: {
|
||||
Code: "ParseExpectedIdentForAt",
|
||||
Description: "Did not find the expected identifier for AT name in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseAsteriskIsNotAloneInSelectList: {
|
||||
Code: "ParseAsteriskIsNotAloneInSelectList",
|
||||
Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseCannotMixSqbAndWildcardInSelectList: {
|
||||
Code: "ParseCannotMixSqbAndWildcardInSelectList",
|
||||
Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseInvalidContextForWildcardInSelectList: {
|
||||
Code: "ParseInvalidContextForWildcardInSelectList",
|
||||
Description: "Invalid use of * in SELECT list in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectSQLFunctionArgumentType: {
|
||||
Code: "IncorrectSqlFunctionArgumentType",
|
||||
Description: "Incorrect type of arguments in function call in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrValueParseFailure: {
|
||||
Code: "ValueParseFailure",
|
||||
Description: "Time stamp parse failure in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidArguments: {
|
||||
Code: "EvaluatorInvalidArguments",
|
||||
Description: "Incorrect number of arguments in the function call in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIntegerOverflow: {
|
||||
Code: "IntegerOverflow",
|
||||
Description: "Int overflow or underflow in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLikeInvalidInputs: {
|
||||
Code: "LikeInvalidInputs",
|
||||
Description: "Invalid argument given to the LIKE clause in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCastFailed: {
|
||||
Code: "CastFailed",
|
||||
Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidCast: {
|
||||
Code: "InvalidCast",
|
||||
Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPattern: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPattern",
|
||||
Description: "Time stamp format pattern requires additional fields in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing",
|
||||
Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorTimestampFormatPatternDuplicateFields: {
|
||||
Code: "EvaluatorTimestampFormatPatternDuplicateFields",
|
||||
Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: {
|
||||
Code: "EvaluatorUnterminatedTimestampFormatPatternToken",
|
||||
Description: "Time stamp format pattern contains unterminated token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorUnterminatedTimestampFormatPatternToken: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternToken",
|
||||
Description: "Time stamp format pattern contains an invalid token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPatternToken: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternToken",
|
||||
Description: "Time stamp format pattern contains an invalid token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbol: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternSymbol",
|
||||
Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorBindingDoesNotExist: {
|
||||
Code: "ErrEvaluatorBindingDoesNotExist",
|
||||
Description: "A column name or a path provided does not exist in the SQL expression",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingHeaders: {
|
||||
Code: "MissingHeaders",
|
||||
Description: "Some headers in the query are missing from the file. Check the file and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidColumnIndex: {
|
||||
Code: "InvalidColumnIndex",
|
||||
Description: "The column index is invalid. Please check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDecompressedSize: {
|
||||
Code: "XMinioInvalidDecompressedSize",
|
||||
Description: "The data provided is unfit for decompression",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
// function written to handle all cases where we have known types of
|
||||
// errors returned by underlying layers.
|
||||
func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case errInvalidRange:
|
||||
apiErr = ErrInvalidRange
|
||||
case errDataTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
@@ -866,17 +1500,19 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
apiErr = ErrAdminInvalidSecretKey
|
||||
// SSE errors
|
||||
case errInsecureSSERequest:
|
||||
apiErr = ErrInsecureSSECustomerRequest
|
||||
case errInvalidSSEAlgorithm:
|
||||
case errInvalidEncryptionParameters:
|
||||
apiErr = ErrInvalidEncryptionParameters
|
||||
case crypto.ErrInvalidEncryptionMethod:
|
||||
apiErr = ErrInvalidEncryptionMethod
|
||||
case crypto.ErrInvalidCustomerAlgorithm:
|
||||
apiErr = ErrInvalidSSECustomerAlgorithm
|
||||
case errInvalidSSEKey:
|
||||
case crypto.ErrInvalidCustomerKey:
|
||||
apiErr = ErrInvalidSSECustomerKey
|
||||
case errMissingSSEKey:
|
||||
case crypto.ErrMissingCustomerKey:
|
||||
apiErr = ErrMissingSSECustomerKey
|
||||
case errMissingSSEKeyMD5:
|
||||
case crypto.ErrMissingCustomerKeyMD5:
|
||||
apiErr = ErrMissingSSECustomerKeyMD5
|
||||
case errSSEKeyMD5Mismatch:
|
||||
case crypto.ErrCustomerKeyMD5Mismatch:
|
||||
apiErr = ErrSSECustomerKeyMD5Mismatch
|
||||
case errObjectTampered:
|
||||
apiErr = ErrObjectTampered
|
||||
@@ -884,12 +1520,24 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrSSEEncryptedObject
|
||||
case errInvalidSSEParameters:
|
||||
apiErr = ErrInvalidSSECustomerParameters
|
||||
case errSSEKeyMismatch:
|
||||
case crypto.ErrInvalidCustomerKey, crypto.ErrSecretKeyMismatch:
|
||||
apiErr = ErrAccessDenied // no access without correct key
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
case crypto.ErrIncompatibleEncryptionMethod:
|
||||
apiErr = ErrIncompatibleEncryptionMethod
|
||||
case errKMSNotConfigured:
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case crypto.ErrKMSAuthLogin:
|
||||
apiErr = ErrKMSAuthFailure
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
switch err {
|
||||
case errInvalidDecompressedSize:
|
||||
apiErr = ErrInvalidDecompressedSize
|
||||
}
|
||||
|
||||
if apiErr != ErrNone {
|
||||
// If there was a match in the above switch case.
|
||||
return apiErr
|
||||
@@ -990,8 +1638,68 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrUnsupportedNotification
|
||||
case BackendDown:
|
||||
apiErr = ErrBackendDown
|
||||
case crypto.Error:
|
||||
apiErr = ErrObjectTampered
|
||||
default:
|
||||
apiErr = ErrInternalError
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
}
|
||||
|
||||
var noError = APIError{}
|
||||
|
||||
// toAPIError - Converts embedded errors. Convenience
|
||||
// function written to handle all cases where we have known types of
|
||||
// errors returned by underlying layers.
|
||||
func toAPIError(ctx context.Context, err error) APIError {
|
||||
if err == nil {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case minio.ErrorResponse:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case *googleapi.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XGCSInternalError",
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
// GCS may send multiple errors, just pick the first one
|
||||
// since S3 only sends one Error XML response.
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case storage.AzureStorageServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
}
|
||||
}
|
||||
|
||||
return apiErr
|
||||
@@ -999,17 +1707,23 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
|
||||
// getAPIError provides API Error for input API error code.
|
||||
func getAPIError(code APIErrorCode) APIError {
|
||||
return errorCodeResponse[code]
|
||||
if apiErr, ok := errorCodes[code]; ok {
|
||||
return apiErr
|
||||
}
|
||||
return errorCodes.ToAPIErr(ErrInternalError)
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(err APIError, resource, requestid string) APIErrorResponse {
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
Resource: resource,
|
||||
RequestID: requestid,
|
||||
HostID: "3L137",
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,13 +17,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var toAPIErrorCodeTests = []struct {
|
||||
var toAPIErrorTests = []struct {
|
||||
err error
|
||||
errCode APIErrorCode
|
||||
}{
|
||||
@@ -51,12 +53,11 @@ var toAPIErrorCodeTests = []struct {
|
||||
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
|
||||
|
||||
// SSE-C errors
|
||||
{err: errInsecureSSERequest, errCode: ErrInsecureSSECustomerRequest},
|
||||
{err: errInvalidSSEAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
|
||||
{err: errMissingSSEKey, errCode: ErrMissingSSECustomerKey},
|
||||
{err: errInvalidSSEKey, errCode: ErrInvalidSSECustomerKey},
|
||||
{err: errMissingSSEKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
|
||||
{err: errSSEKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
|
||||
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
|
||||
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
|
||||
{err: crypto.ErrInvalidCustomerKey, errCode: ErrInvalidSSECustomerKey},
|
||||
{err: crypto.ErrMissingCustomerKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
|
||||
{err: crypto.ErrCustomerKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
|
||||
{err: errObjectTampered, errCode: ErrObjectTampered},
|
||||
|
||||
{err: nil, errCode: ErrNone},
|
||||
@@ -64,8 +65,9 @@ var toAPIErrorCodeTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
for i, testCase := range toAPIErrorCodeTests {
|
||||
errCode := toAPIErrorCode(testCase.err)
|
||||
ctx := context.Background()
|
||||
for i, testCase := range toAPIErrorTests {
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
if errCode != testCase.errCode {
|
||||
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -34,13 +36,16 @@ func mustGetRequestID(t time.Time) string {
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("Server", globalServerUserAgent)
|
||||
w.Header().Set("Server", "Minio/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
w.Header().Set("X-Amz-Bucket-Region", region)
|
||||
}
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
// Remove sensitive information
|
||||
crypto.RemoveSensitiveHeaders(w.Header())
|
||||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
@@ -61,13 +66,10 @@ func encodeResponseJSON(response interface{}) []byte {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *httpRange) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(objInfo.Size, 10))
|
||||
|
||||
// Set last modified time.
|
||||
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
@@ -95,11 +97,34 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
if contentRange != nil && contentRange.offsetBegin > -1 {
|
||||
// Override content-length
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(contentRange.getLength(), 10))
|
||||
w.Header().Set("Content-Range", contentRange.String())
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
var totalObjectSize int64
|
||||
switch {
|
||||
case crypto.IsEncrypted(objInfo.UserDefined):
|
||||
totalObjectSize, err = objInfo.DecryptedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
|
||||
w.Header().Set("Content-Range", contentRange)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,15 +22,22 @@ import (
|
||||
)
|
||||
|
||||
// Parse bucket url queries
|
||||
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
@@ -47,44 +54,69 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
}
|
||||
}
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
token = values.Get("continuation-token")
|
||||
startAfter = values.Get("start-after")
|
||||
delimiter = values.Get("delimiter")
|
||||
if values.Get("max-keys") != "" {
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ?uploads
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-uploads") != "" {
|
||||
var err error
|
||||
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
|
||||
errCode = ErrInvalidMaxUploads
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
if values.Get("max-uploads") != "" {
|
||||
maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse object url queries
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
|
||||
uploadID = values.Get("uploadId")
|
||||
partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
|
||||
var err error
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-parts") != "" {
|
||||
maxParts, _ = strconv.Atoi(values.Get("max-parts"))
|
||||
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
|
||||
errCode = ErrInvalidMaxParts
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxParts = maxPartsList
|
||||
}
|
||||
|
||||
if values.Get("part-number-marker") != "" {
|
||||
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
|
||||
errCode = ErrInvalidPartNumberMarker
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
uploadID = values.Get("uploadId")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,7 +156,10 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
prefix, marker, delimiter, maxKeys, encodingType := getListObjectsV1Args(testCase.values)
|
||||
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
|
||||
if argsErr != ErrNone {
|
||||
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
|
||||
}
|
||||
if prefix != testCase.prefix {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
|
||||
}
|
||||
@@ -198,7 +201,10 @@ func TestGetObjectsResources(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
uploadID, partNumberMarker, maxParts, encodingType := getObjectResources(testCase.values)
|
||||
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
|
||||
if argsErr != ErrNone {
|
||||
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
|
||||
}
|
||||
if uploadID != testCase.uploadID {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,10 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "net/http"
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Represents additional fields necessary for ErrPartTooSmall S3 error.
|
||||
type completeMultipartAPIError struct {
|
||||
@@ -40,11 +43,12 @@ type completeMultipartAPIError struct {
|
||||
// XML doesn't carry the additional fields required to send this
|
||||
// error. So we construct a new type which lies well within the scope
|
||||
// of this function.
|
||||
func writePartSmallErrorResponse(w http.ResponseWriter, r *http.Request, err PartTooSmall) {
|
||||
|
||||
apiError := getAPIError(toAPIErrorCode(err))
|
||||
func writePartSmallErrorResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, err PartTooSmall) {
|
||||
apiError := toAPIError(ctx, err)
|
||||
// Generate complete multipart error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, r.URL.Path, w.Header().Get(responseRequestIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, apiError, r.URL.Path,
|
||||
w.Header().Get(responseRequestIDKey),
|
||||
w.Header().Get(responseDeploymentIDKey))
|
||||
cmpErrResp := completeMultipartAPIError{err.PartSize, int64(5242880), err.PartNumber, err.PartETag, errorResponse}
|
||||
encodedErrorResponse := encodeResponse(cmpErrResp)
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
)
|
||||
|
||||
@@ -568,49 +570,57 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
// writeErrorRespone writes error headers
|
||||
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
|
||||
switch errorCode {
|
||||
case ErrSlowDown, ErrServerNotInitialized, ErrReadQuorum, ErrWriteQuorum:
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, browser bool) {
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
apiError := getAPIError(errorCode)
|
||||
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
func writeErrorResponseHeadersOnly(w http.ResponseWriter, errorCode APIErrorCode) {
|
||||
apiError := getAPIError(errorCode)
|
||||
writeResponse(w, apiError.HTTPStatusCode, nil, mimeNone)
|
||||
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
|
||||
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
|
||||
}
|
||||
|
||||
// writeErrorResponseJSON - writes error response in JSON format;
|
||||
// useful for admin APIs.
|
||||
func writeErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
|
||||
apiError := getAPIError(errorCode)
|
||||
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(responseRequestIDKey), w.Header().Get(responseDeploymentIDKey))
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode,
|
||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||
errBody string, reqURL *url.URL) {
|
||||
|
||||
apiError := getAPIError(errorCode)
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: apiError.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
RequestID: "3L137",
|
||||
HostID: "3L137",
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: w.Header().Get(responseDeploymentIDKey),
|
||||
}
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
@@ -20,29 +20,25 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
var err error
|
||||
var cacheConfig = globalServerConfig.GetCacheConfig()
|
||||
if len(cacheConfig.Drives) > 0 {
|
||||
// initialize the new disk cache objects.
|
||||
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
|
||||
logger.FatalIf(err, "Unable to initialize disk caching")
|
||||
}
|
||||
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCacheObjectsFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
@@ -71,6 +67,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
// GetObjectTagging - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObject
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
// CopyObject
|
||||
@@ -86,8 +86,31 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
// GetBucketVersioningHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
|
||||
// GetBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@@ -27,8 +28,11 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -86,6 +90,7 @@ const (
|
||||
authTypeSigned
|
||||
authTypeSignedV2
|
||||
authTypeJWT
|
||||
authTypeSTS
|
||||
)
|
||||
|
||||
// Get request authentication type.
|
||||
@@ -104,6 +109,8 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()["Action"]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header["Authorization"]; !ok {
|
||||
return authTypeAnonymous
|
||||
}
|
||||
@@ -112,43 +119,140 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok && getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) { // we only support V4 (no presign) with auth. body
|
||||
s3Err = isReqAuthenticated(r, region)
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
var owner bool
|
||||
_, owner, s3Err = getReqAccessKeyV4(r, region)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if !owner {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
s3Err = isReqAuthenticated(ctx, r, region)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
|
||||
}
|
||||
return s3Err
|
||||
}
|
||||
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) APIErrorCode {
|
||||
isOwner := true
|
||||
accountName := globalServerConfig.GetCredential().AccessKey
|
||||
// Fetch the security token set by the client.
|
||||
func getSessionToken(r *http.Request) (token string) {
|
||||
token = r.Header.Get("X-Amz-Security-Token")
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get("X-Amz-Security-Token")
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
claims := make(map[string]interface{})
|
||||
token := getSessionToken(r)
|
||||
if token == "" {
|
||||
return claims, nil
|
||||
}
|
||||
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
// that clients cannot decode the token using the temp
|
||||
// secret keys and generate an entirely new claim by essentially
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalServerConfig.GetCredential().SecretKey), nil
|
||||
}
|
||||
p := &jwtgo.Parser{
|
||||
ValidMethods: []string{
|
||||
jwtgo.SigningMethodHS256.Alg(),
|
||||
jwtgo.SigningMethodHS512.Alg(),
|
||||
},
|
||||
}
|
||||
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !jtoken.Valid {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
v, ok := claims["accessKey"]
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
if _, ok = v.(string); !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown:
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if errorCode := isReqAuthenticatedV2(r); errorCode != ErrNone {
|
||||
return errorCode
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
}
|
||||
|
||||
if errorCode := isReqAuthenticated(r, region); errorCode != ErrNone {
|
||||
return errorCode
|
||||
if s3Err = isReqAuthenticated(ctx, r, region); s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
default:
|
||||
isOwner = false
|
||||
accountName = ""
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -174,17 +278,31 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: accountName,
|
||||
Action: action,
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, ""),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint),
|
||||
IsOwner: isOwner,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -209,7 +327,7 @@ func reqSignatureV4Verify(r *http.Request, region string) (s3Error APIErrorCode)
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '4'.
|
||||
func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
func isReqAuthenticated(ctx context.Context, r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
if errCode := reqSignatureV4Verify(r, region); errCode != ErrNone {
|
||||
return errCode
|
||||
}
|
||||
@@ -244,9 +362,9 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256))
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(err)
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
return ErrNone
|
||||
@@ -288,12 +406,67 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if !isHTTPRequestValid(r) {
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL)
|
||||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// isPutAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", ""),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -344,11 +345,14 @@ func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int6
|
||||
|
||||
// Tests is requested authenticated function, tests replies for s3 errors.
|
||||
func TestIsReqAuthenticated(t *testing.T) {
|
||||
path, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -374,21 +378,26 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := isReqAuthenticated(testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(err))
|
||||
if s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
path, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -406,8 +415,9 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := checkAdminRequestAuthType(testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
@@ -50,7 +49,6 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
@@ -62,7 +60,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -97,13 +95,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
metadata := make(map[string]string)
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata)
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
var textPartData []byte
|
||||
@@ -120,10 +116,10 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
} else {
|
||||
textPartData = textData[j*partSize:]
|
||||
}
|
||||
md5hex = getMD5Hash([]byte(textPartData))
|
||||
md5hex := getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -138,12 +134,6 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -151,18 +141,13 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created on function return.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runPutObjectPartBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -170,18 +155,13 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created on function return.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runPutObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -189,6 +169,7 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created on function return.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
@@ -196,16 +177,10 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
// Benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -215,7 +190,6 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
@@ -225,7 +199,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -240,7 +214,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
@@ -255,10 +229,8 @@ func getRandomByte() []byte {
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
// seeding the random number generator.
|
||||
rand.Seed(UTCNow().UnixNano())
|
||||
var b byte
|
||||
// pick a character randomly.
|
||||
b = letterBytes[rand.Intn(len(letterBytes))]
|
||||
return []byte{b}
|
||||
return []byte{letterBytes[rand.Intn(len(letterBytes))]}
|
||||
}
|
||||
|
||||
// picks a random byte and repeats it to size bytes.
|
||||
@@ -269,12 +241,6 @@ func generateBytesData(size int) []byte {
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -282,18 +248,13 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -301,6 +262,7 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
@@ -308,16 +270,10 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -326,7 +282,6 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
@@ -341,7 +296,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -359,16 +314,10 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// Parallel benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -378,7 +327,6 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
@@ -387,7 +335,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -403,7 +351,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
172
cmd/bitrot-streaming.go
Normal file
172
cmd/bitrot-streaming.go
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block, i.e prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
verifyTillIdx int
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
n, err := b.iow.Write(hashBytes)
|
||||
if n != len(hashBytes) {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
n, err = b.iow.Write(p)
|
||||
b.currentBlockIdx++
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
err := b.iow.Close()
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
// 1) pipe.Write()
|
||||
// 2) pipe.Close()
|
||||
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
|
||||
// Hence an immediate Read() on the file can return incorrect data.
|
||||
<-b.canClose
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)}
|
||||
go func() {
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize := bitrotSumsTotalSize + length
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
r.CloseWithError(err)
|
||||
}
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
||||
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
||||
type streamingBitrotReader struct {
|
||||
disk StorageAPI
|
||||
rc io.ReadCloser
|
||||
volume string
|
||||
filePath string
|
||||
tillOffset int64
|
||||
currOffset int64
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
hashBytes []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) Close() error {
|
||||
if b.rc == nil {
|
||||
return nil
|
||||
}
|
||||
return b.rc.Close()
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
var err error
|
||||
if offset%b.shardSize != 0 {
|
||||
// Offset should always be aligned to b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if b.rc == nil {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if offset != b.currOffset {
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
b.h.Reset()
|
||||
_, err = io.ReadFull(b.rc, b.hashBytes)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = io.ReadFull(b.rc, buf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// Returns streaming bitrot reader implementation.
|
||||
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
h := algo.New()
|
||||
return &streamingBitrotReader{
|
||||
disk,
|
||||
nil,
|
||||
volume,
|
||||
filePath,
|
||||
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
|
||||
0,
|
||||
h,
|
||||
shardSize,
|
||||
make([]byte, h.Size()),
|
||||
}
|
||||
}
|
||||
109
cmd/bitrot-whole.go
Normal file
109
cmd/bitrot-whole.go
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Implementation to calculate bitrot for the whole file.
|
||||
type wholeBitrotWriter struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
shardSize int64 // This is the shard size of the erasure logic
|
||||
hash.Hash // For bitrot hash
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block and prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
lastBlockIdx int
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currentBlockIdx++
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot writer.
|
||||
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)}
|
||||
}
|
||||
|
||||
// Implementation to verify bitrot for the whole file.
|
||||
type wholeBitrotReader struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
verifier *BitrotVerifier // Holds the bit-rot info
|
||||
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
|
||||
buf []byte // Holds bit-rot verified data
|
||||
}
|
||||
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := context.Background()
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(context.Background(), errLessData)
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
b.buf = b.buf[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot reader.
|
||||
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader {
|
||||
return &wholeBitrotReader{
|
||||
disk: disk,
|
||||
volume: volume,
|
||||
filePath: filePath,
|
||||
verifier: &BitrotVerifier{algo, sum},
|
||||
tillOffset: tillOffset,
|
||||
buf: nil,
|
||||
}
|
||||
}
|
||||
187
cmd/bitrot.go
Normal file
187
cmd/bitrot.go
Normal file
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
|
||||
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
|
||||
|
||||
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
|
||||
type BitrotAlgorithm uint
|
||||
|
||||
const (
|
||||
// SHA256 represents the SHA-256 hash function
|
||||
SHA256 BitrotAlgorithm = 1 + iota
|
||||
// HighwayHash256 represents the HighwayHash-256 hash function
|
||||
HighwayHash256
|
||||
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
|
||||
HighwayHash256S
|
||||
// BLAKE2b512 represents the BLAKE2b-512 hash function
|
||||
BLAKE2b512
|
||||
)
|
||||
|
||||
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
|
||||
const (
|
||||
DefaultBitrotAlgorithm = HighwayHash256S
|
||||
)
|
||||
|
||||
var bitrotAlgorithms = map[BitrotAlgorithm]string{
|
||||
SHA256: "sha256",
|
||||
BLAKE2b512: "blake2b",
|
||||
HighwayHash256: "highwayhash256",
|
||||
HighwayHash256S: "highwayhash256S",
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash calculating the given bitrot algorithm.
|
||||
func (a BitrotAlgorithm) New() hash.Hash {
|
||||
switch a {
|
||||
case SHA256:
|
||||
return sha256.New()
|
||||
case BLAKE2b512:
|
||||
b2, _ := blake2b.New512(nil) // New512 never returns an error if the key is nil
|
||||
return b2
|
||||
case HighwayHash256:
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
case HighwayHash256S:
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
default:
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Available reports whether the given algorithm is available.
|
||||
func (a BitrotAlgorithm) Available() bool {
|
||||
_, ok := bitrotAlgorithms[a]
|
||||
return ok
|
||||
}
|
||||
|
||||
// String returns the string identifier for a given bitrot algorithm.
|
||||
// If the algorithm is not supported String panics.
|
||||
func (a BitrotAlgorithm) String() string {
|
||||
name, ok := bitrotAlgorithms[a]
|
||||
if !ok {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// NewBitrotVerifier returns a new BitrotVerifier implementing the given algorithm.
|
||||
func NewBitrotVerifier(algorithm BitrotAlgorithm, checksum []byte) *BitrotVerifier {
|
||||
return &BitrotVerifier{algorithm, checksum}
|
||||
}
|
||||
|
||||
// BitrotVerifier can be used to verify protected data.
|
||||
type BitrotVerifier struct {
|
||||
algorithm BitrotAlgorithm
|
||||
sum []byte
|
||||
}
|
||||
|
||||
// BitrotAlgorithmFromString returns a bitrot algorithm from the given string representation.
|
||||
// It returns 0 if the string representation does not match any supported algorithm.
|
||||
// The zero value of a bitrot algorithm is never supported.
|
||||
func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
for alg, name := range bitrotAlgorithms {
|
||||
if name == s {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
|
||||
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
|
||||
}
|
||||
|
||||
// Close all the readers.
|
||||
func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
for _, r := range rs {
|
||||
if br, ok := r.(*streamingBitrotReader); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if bw, ok := w.(*streamingBitrotWriter); ok {
|
||||
bw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
|
||||
func bitrotWriterSum(w io.Writer) []byte {
|
||||
if bw, ok := w.(*wholeBitrotWriter); ok {
|
||||
return bw.Sum(nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify if a file has bitrot error.
|
||||
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
|
||||
if algo != HighwayHash256S {
|
||||
buf := []byte{}
|
||||
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
|
||||
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, shardSize)
|
||||
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
|
||||
defer closeBitrotReaders([]io.ReaderAt{r})
|
||||
var offset int64
|
||||
for {
|
||||
if offset == tillOffset {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
tmpBuf := buf
|
||||
if int64(len(tmpBuf)) > (tillOffset - offset) {
|
||||
tmpBuf = tmpBuf[:(tillOffset - offset)]
|
||||
}
|
||||
n, err = r.ReadAt(tmpBuf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += int64(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
84
cmd/bitrot_test.go
Normal file
84
cmd/bitrot_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newPosix(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
|
||||
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
if _, err = reader.ReadAt(b, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 10); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 20); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllBitrotAlgorithms(t *testing.T) {
|
||||
for bitrotAlgo := range bitrotAlgorithms {
|
||||
testBitrotReaderWriterAlgo(t, bitrotAlgo)
|
||||
}
|
||||
}
|
||||
@@ -56,11 +56,12 @@ func (bf *BoolFlag) UnmarshalJSON(data []byte) (err error) {
|
||||
|
||||
// ParseBoolFlag - parses string into BoolFlag.
|
||||
func ParseBoolFlag(s string) (bf BoolFlag, err error) {
|
||||
if s == "on" {
|
||||
switch s {
|
||||
case "on":
|
||||
bf = true
|
||||
} else if s == "off" {
|
||||
case "off":
|
||||
bf = false
|
||||
} else {
|
||||
default:
|
||||
err = fmt.Errorf("invalid value ‘%s’ for BoolFlag", s)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,9 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -56,17 +59,19 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,18 +79,18 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _, errCode := getListObjectsV2Args(urlValues)
|
||||
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(w, errCode, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(prefix, token, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
if api.CacheAPI() != nil {
|
||||
listObjectsV2 = api.CacheAPI().ListObjectsV2
|
||||
@@ -95,15 +100,26 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if listObjectsV2Info.Objects[i].IsEncrypted() {
|
||||
var actualSize int64
|
||||
if listObjectsV2Info.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsV2Info.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -125,56 +141,69 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
|
||||
// does not throw an error.
|
||||
if maxKeys < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
|
||||
return
|
||||
} // Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
prefix, marker, delimiter, maxKeys, _, s3Error := getListObjectsV1Args(r.URL.Query())
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
if api.CacheAPI() != nil {
|
||||
listObjects = api.CacheAPI().ListObjects
|
||||
}
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
if listObjectsInfo.Objects[i].IsEncrypted() {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
@@ -88,17 +89,19 @@ func initFederatorBackend(objLayer ObjectLayer) {
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLocation")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -107,7 +110,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -136,36 +139,44 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListMultipartUploads")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
|
||||
if maxUploads < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _, errCode := getBucketMultipartResources(r.URL.Query())
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if maxUploads < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxUploads), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if keyMarker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !hasPrefix(keyMarker, prefix) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// generate response
|
||||
@@ -183,27 +194,30 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBuckets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
if api.CacheAPI() != nil {
|
||||
listBuckets = api.CacheAPI().ListBuckets
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var bucketsInfo []BucketInfo
|
||||
if globalDNSConfig != nil {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
bucketSet := set.NewStringSet()
|
||||
@@ -222,7 +236,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
var err error
|
||||
bucketsInfo, err = listBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -239,12 +253,14 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteMultipleObjects")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -253,7 +269,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// In the event access is denied, a 200 response should still be returned
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -261,14 +277,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -284,7 +300,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Read incoming body XML bytes.
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
writeErrorResponse(ctx, w, toAdminAPIErr(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -292,7 +308,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -300,7 +316,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if globalWORMEnabled {
|
||||
// Not required to check whether given objects exist or not, because
|
||||
// DeleteMultipleObject is always successful irrespective of object existence.
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -339,10 +355,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, err)
|
||||
// Error during delete should be collected separately.
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
|
||||
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
})
|
||||
}
|
||||
@@ -369,10 +386,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
Object: ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
},
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -383,9 +401,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -393,21 +413,21 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse incoming location constraint.
|
||||
location, s3Error := parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if location sent by the client is valid, reject
|
||||
// requests which do not follow valid region requirements.
|
||||
if !isValidLocation(location) {
|
||||
writeErrorResponse(w, ErrInvalidRegion, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRegion), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -416,12 +436,12 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(ctx, bucket)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -431,18 +451,18 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
}
|
||||
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBucketAlreadyOwnedByYou), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -459,9 +479,21 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PostPolicyBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -470,17 +502,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInvalidRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -489,7 +521,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -497,7 +529,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
form, err := reader.ReadForm(maxFormMemory)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -508,13 +540,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if file is provided, error out otherwise.
|
||||
if fileBody == nil {
|
||||
writeErrorResponse(w, ErrPOSTFileRequired, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPOSTFileRequired), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -536,92 +568,112 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if successRedirect != "" {
|
||||
redirectURL, err = url.Parse(successRedirect)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Verify policy signature.
|
||||
apiErr := doesPolicySignatureMatch(formValues)
|
||||
if apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
errCode := doesPolicySignatureMatch(formValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooSmall), r.URL)
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if errCode = checkPostPolicy(formValues, postPolicyForm); errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooSmall), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract metadata to be saved from received Form.
|
||||
metadata := make(map[string]string)
|
||||
err = extractMetadataFromMap(ctx, formValues, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
var objectEncryptionKey []byte
|
||||
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasSSECustomerHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C requests
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
var reader io.Reader
|
||||
var key []byte
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
if crypto.SSEC.IsRequested(formValues) {
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
reader, err = newEncryptReader(hashReader, key, bucket, object, metadata)
|
||||
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -637,13 +689,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
|
||||
if successRedirect != "" {
|
||||
@@ -679,17 +732,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HeadBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, s3Error)
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -698,7 +753,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -709,17 +764,19 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -729,7 +786,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -741,7 +798,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
// Deleting DNS entry failed, attempt to create the bucket again.
|
||||
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for Get bucket location.
|
||||
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -95,12 +95,12 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||
}
|
||||
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), string(rec.Body.Bytes()))
|
||||
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String())
|
||||
}
|
||||
errorResponse := APIErrorResponse{}
|
||||
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
|
||||
if err != nil && !testCase.shouldPass {
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(rec.Body.Bytes()))
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
|
||||
}
|
||||
if errorResponse.Resource != testCase.errorResponse.Resource {
|
||||
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
|
||||
@@ -116,7 +116,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -131,7 +131,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
errorResponse = APIErrorResponse{}
|
||||
err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse)
|
||||
if err != nil && !testCase.shouldPass {
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(recV2.Body.Bytes()))
|
||||
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String())
|
||||
}
|
||||
if errorResponse.Resource != testCase.errorResponse.Resource {
|
||||
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
|
||||
@@ -220,7 +220,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for HEAD bucket.
|
||||
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -235,7 +235,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -437,7 +437,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// construct HTTP request for List multipart uploads endpoint.
|
||||
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
|
||||
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if gerr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
|
||||
}
|
||||
@@ -454,7 +454,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -471,7 +471,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// construct HTTP request for List multipart uploads endpoint.
|
||||
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
|
||||
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "") // Generate an anonymous request.
|
||||
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
@@ -551,7 +551,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
for i, testCase := range testCases {
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if lerr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
|
||||
}
|
||||
@@ -568,7 +568,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
@@ -645,8 +645,8 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) {
|
||||
for _, obj := range objects {
|
||||
deleteErrorList = append(deleteErrorList, DeleteError{
|
||||
Code: errorCodeResponse[ErrAccessDenied].Code,
|
||||
Message: errorCodeResponse[ErrAccessDenied].Description,
|
||||
Code: errorCodes[ErrAccessDenied].Code,
|
||||
Message: errorCodes[ErrAccessDenied].Description,
|
||||
Key: obj.ObjectName,
|
||||
})
|
||||
}
|
||||
@@ -745,7 +745,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
// Generate a signed or anonymous request based on the testCase
|
||||
if testCase.accessKey != "" {
|
||||
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey)
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
|
||||
} else {
|
||||
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
|
||||
@@ -785,7 +785,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
nilBucket := "dummy-bucket"
|
||||
nilObject := ""
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "")
|
||||
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
@@ -44,28 +44,30 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,17 +76,21 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if err != errNoSuchNotifications {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if nConfig.XMLNS == "" {
|
||||
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -96,14 +102,16 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objectAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,36 +119,36 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucketNotification always needs a Content-Length.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var config *event.Config
|
||||
config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerConfig.GetRegion(), globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := ErrMalformedXML
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
apiErr = toAPIErrorCode(err)
|
||||
apiErr = toAPIError(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,22 +164,29 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListenBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate if bucket exists.
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsListenBucketSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListenBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -179,11 +194,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
var prefix string
|
||||
if len(values["prefix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNamePrefix, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values["prefix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -192,11 +209,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
var suffix string
|
||||
if len(values["suffix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNameSuffix, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values["suffix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -209,7 +228,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
for _, s := range values["events"] {
|
||||
eventName, err := event.ParseName(s)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -217,19 +236,19 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(r.RemoteAddr)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
target, err := target.NewHTTPClientTarget(*host, w)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -237,8 +256,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
if err = globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
|
||||
@@ -246,14 +264,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -263,8 +280,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
if err = RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,9 +40,11 @@ const (
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,43 +52,43 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
// PutBucketPolicy always needs Content-Length.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if r.ContentLength > maxBucketPolicySize {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,9 +103,11 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,18 +115,18 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,9 +141,11 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -147,27 +153,26 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access policy.
|
||||
bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -254,7 +254,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey)
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -268,7 +268,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey)
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "")
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -346,7 +346,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -360,7 +360,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -417,7 +417,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -456,7 +456,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -511,7 +511,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "")
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -591,7 +591,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -641,7 +641,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for Delete bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -663,7 +663,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -680,7 +680,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for Delete bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -714,7 +714,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "")
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
|
||||
56
cmd/certs.go
56
cmd/certs.go
@@ -68,24 +68,6 @@ func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
|
||||
}
|
||||
|
||||
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
// Get all CA file names.
|
||||
var caFiles []string
|
||||
fis, err := readDir(certsCAsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
// We are only interested in regular files here.
|
||||
caFiles = append(caFiles, pathJoin(certsCAsDir, fi))
|
||||
}
|
||||
if len(caFiles) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
@@ -94,16 +76,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
|
||||
// Load custom root CAs for client requests
|
||||
for _, caFile := range caFiles {
|
||||
caCert, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
fis, err := readDir(certsCAsDir)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
err = nil // Return success if CA's directory is missing.
|
||||
}
|
||||
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
return rootCAs, err
|
||||
}
|
||||
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||
if err != nil {
|
||||
return rootCAs, err
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
}
|
||||
return rootCAs, nil
|
||||
}
|
||||
|
||||
@@ -150,24 +142,20 @@ func loadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, nil, false, nil
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
|
||||
if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), loadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
if rootCAs, err = getRootCAs(getCADir()); err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
secureConn = true
|
||||
return x509Certs, rootCAs, c, secureConn, nil
|
||||
return x509Certs, c, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -223,7 +223,8 @@ func TestGetRootCAs(t *testing.T) {
|
||||
certCAsDir string
|
||||
expectedErr error
|
||||
}{
|
||||
{"nonexistent-dir", errFileNotFound},
|
||||
// ignores non-existent directories.
|
||||
{"nonexistent-dir", nil},
|
||||
// Ignores directories.
|
||||
{dir1, nil},
|
||||
// Ignore empty directory.
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
@@ -27,13 +27,15 @@ import (
|
||||
"time"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/console"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Check for updates and print a notification message
|
||||
@@ -48,90 +50,136 @@ func checkUpdate(mode string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize and load config from remote etcd or local config directory
|
||||
func initConfig() {
|
||||
if globalEtcdClient != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
resp, err := globalEtcdClient.Get(ctx, getConfigFile())
|
||||
cancel()
|
||||
// This means there are no entries in etcd with config file
|
||||
// So create a new config
|
||||
if err == nil && resp.Count == 0 {
|
||||
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time.")
|
||||
logger.Info("Created minio configuration file successfully at %v", globalEtcdClient.Endpoints())
|
||||
} else {
|
||||
// This means there is an entry in etcd, update it if required and proceed
|
||||
if err == nil && resp.Count > 0 {
|
||||
logger.FatalIf(migrateConfig(), "Config migration failed.")
|
||||
logger.FatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion)
|
||||
} else {
|
||||
logger.FatalIf(err, "Unable to load config version: '%s'.", serverConfigVersion)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if isFile(getConfigFile()) {
|
||||
logger.FatalIf(migrateConfig(), "Config migration failed")
|
||||
logger.FatalIf(loadConfig(), "Unable to load the configuration file")
|
||||
} else {
|
||||
// Config file does not exist, we create it fresh and return upon success.
|
||||
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time")
|
||||
logger.Info("Created minio configuration file successfully at " + getConfigDir())
|
||||
}
|
||||
}
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
func loadLoggers() {
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(logger.NewConsole())
|
||||
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable audit HTTP logging through ENV.
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
|
||||
}
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(logger.NewHTTP(l.Endpoint, NewCustomHTTPTransport()))
|
||||
|
||||
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable HTTP logging through ENV.
|
||||
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
|
||||
} else {
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(console.New())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
var dir string
|
||||
var dirSet bool
|
||||
|
||||
switch {
|
||||
case ctx.IsSet(option):
|
||||
dir = ctx.String(option)
|
||||
dirSet = true
|
||||
case ctx.GlobalIsSet(option):
|
||||
dir = ctx.GlobalString(option)
|
||||
dirSet = true
|
||||
// cli package does not expose parent's option option. Below code is workaround.
|
||||
if dir == "" || dir == getDefaultDir() {
|
||||
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
|
||||
if ctx.Parent().GlobalIsSet(option) {
|
||||
dir = ctx.Parent().GlobalString(option)
|
||||
dirSet = true
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Neither local nor global option is provided. In this case, try to use
|
||||
// default directory.
|
||||
dir = getDefaultDir()
|
||||
if dir == "" {
|
||||
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
|
||||
}
|
||||
}
|
||||
|
||||
if dir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
dirAbs, err := filepath.Abs(dir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
|
||||
|
||||
return &ConfigDir{path: dirAbs}, dirSet
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
var configDir string
|
||||
|
||||
if ctx.IsSet("config-dir") {
|
||||
configDir = ctx.String("config-dir")
|
||||
} else if ctx.GlobalIsSet("config-dir") {
|
||||
configDir = ctx.GlobalString("config-dir")
|
||||
// cli package does not expose parent's "config-dir" option. Below code is workaround.
|
||||
if configDir == "" || configDir == getConfigDir() {
|
||||
if ctx.Parent().GlobalIsSet("config-dir") {
|
||||
configDir = ctx.Parent().GlobalString("config-dir")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Neither local nor global config-dir option is provided. In this case, try to use
|
||||
// default config directory.
|
||||
configDir = getConfigDir()
|
||||
if configDir == "" {
|
||||
logger.FatalIf(errors.New("missing option"), "config-dir option must be provided")
|
||||
}
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if json flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
if globalCLIContext.JSON {
|
||||
logger.EnableJSON()
|
||||
}
|
||||
|
||||
if configDir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty")
|
||||
// Get quiet flag from command line argument.
|
||||
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
|
||||
if globalCLIContext.Quiet {
|
||||
logger.EnableQuiet()
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
configDirAbs, err := filepath.Abs(configDir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
|
||||
setConfigDir(configDirAbs)
|
||||
// Get anonymous flag from command line argument.
|
||||
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
|
||||
if globalCLIContext.Anonymous {
|
||||
logger.EnableAnonymous()
|
||||
}
|
||||
|
||||
// Fetch address option
|
||||
globalCLIContext.Addr = ctx.GlobalString("address")
|
||||
if globalCLIContext.Addr == "" || globalCLIContext.Addr == ":"+globalMinioDefaultPort {
|
||||
globalCLIContext.Addr = ctx.String("address")
|
||||
}
|
||||
|
||||
// Set all config, certs and CAs directories.
|
||||
var configSet, certsSet bool
|
||||
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
|
||||
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
|
||||
|
||||
// Remove this code when we deprecate and remove config-dir.
|
||||
// This code is to make sure we inherit from the config-dir
|
||||
// option if certs-dir is not provided.
|
||||
if !certsSet && configSet {
|
||||
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
|
||||
}
|
||||
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
}
|
||||
|
||||
// Parses the given compression exclude list `extensions` or `content-types`.
|
||||
func parseCompressIncludes(includes []string) ([]string, error) {
|
||||
for _, e := range includes {
|
||||
if len(e) == 0 {
|
||||
return nil, uiErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type (%s) cannot be empty", e)
|
||||
}
|
||||
}
|
||||
return includes, nil
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
compressEnvDelimiter := ","
|
||||
// Start profiler if env is set.
|
||||
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
|
||||
globalProfiler = startProfiler(profiler)
|
||||
var err error
|
||||
globalProfiler, err = startProfiler(profiler, "")
|
||||
logger.FatalIf(err, "Unable to setup a profiler")
|
||||
}
|
||||
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
@@ -141,6 +189,7 @@ func handleCommonEnvVars() {
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
cred.Expiration = timeSentinel
|
||||
|
||||
// credential Envs are set globally.
|
||||
globalIsEnvCreds = true
|
||||
@@ -150,7 +199,7 @@ func handleCommonEnvVars() {
|
||||
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
|
||||
browserFlag, err := ParseBoolFlag(browser)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Unable to validate MINIO_BROWSER environment variable")
|
||||
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
||||
// browser Envs are set globally, this does not represent
|
||||
@@ -169,28 +218,82 @@ func handleCommonEnvVars() {
|
||||
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
|
||||
if ok {
|
||||
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
|
||||
|
||||
var etcdSecure bool
|
||||
for _, endpoint := range etcdEndpoints {
|
||||
u, err := xnet.ParseURL(endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
|
||||
}
|
||||
// If one of the endpoint is https, we will use https directly.
|
||||
etcdSecure = etcdSecure || u.Scheme == "https"
|
||||
}
|
||||
|
||||
var err error
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
})
|
||||
if etcdSecure {
|
||||
// This is only to support client side certificate authentication
|
||||
// https://coreos.com/etcd/docs/latest/op-guide/security.html
|
||||
etcdClientCertFile, ok1 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT")
|
||||
etcdClientCertKey, ok2 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT_KEY")
|
||||
var getClientCertificate func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
if ok1 && ok2 {
|
||||
getClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
cert, terr := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
|
||||
return &cert, terr
|
||||
}
|
||||
}
|
||||
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
TLS: &tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
GetClientCertificate: getClientCertificate,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
})
|
||||
}
|
||||
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
|
||||
}
|
||||
|
||||
globalDomainName, globalIsEnvDomainName = os.LookupEnv("MINIO_DOMAIN")
|
||||
if globalDomainName != "" {
|
||||
if _, ok = dns2.IsDomainName(globalDomainName); !ok {
|
||||
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", globalDomainName), "Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
minioEndpointsEnv, ok := os.LookupEnv("MINIO_PUBLIC_IPS")
|
||||
if ok {
|
||||
minioEndpoints := strings.Split(minioEndpointsEnv, ",")
|
||||
globalDomainIPs = set.NewStringSet()
|
||||
for i, ip := range minioEndpoints {
|
||||
if net.ParseIP(ip) == nil {
|
||||
logger.FatalIf(errInvalidArgument, "Unable to initialize Minio server with invalid MINIO_PUBLIC_IPS[%d]: %s", i, ip)
|
||||
var domainIPs = set.NewStringSet()
|
||||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
addrs, err := net.LookupHost(endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize Minio server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
domainIPs.Add(addr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
globalDomainIPs.Add(ip)
|
||||
domainIPs.Add(endpoint)
|
||||
}
|
||||
updateDomainIPs(domainIPs)
|
||||
} else {
|
||||
// Add found interfaces IP address to global domain IPS,
|
||||
// loopback addresses will be naturally dropped.
|
||||
updateDomainIPs(localIP4)
|
||||
}
|
||||
|
||||
if globalDomainName != "" && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
var err error
|
||||
globalDNSConfig, err = dns.NewCoreDNS(globalDomainName, globalDomainIPs, globalMinioPort, globalEtcdClient)
|
||||
@@ -271,7 +374,7 @@ func handleCommonEnvVars() {
|
||||
if worm := os.Getenv("MINIO_WORM"); worm != "" {
|
||||
wormFlag, err := ParseBoolFlag(worm)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Unable to validate MINIO_WORM environment variable")
|
||||
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Invalid MINIO_WORM value in environment variable")
|
||||
}
|
||||
|
||||
// worm Envs are set globally, this does not represent
|
||||
@@ -279,4 +382,28 @@ func handleCommonEnvVars() {
|
||||
globalIsEnvWORM = true
|
||||
globalWORMEnabled = bool(wormFlag)
|
||||
}
|
||||
|
||||
if compress := os.Getenv("MINIO_COMPRESS"); compress != "" {
|
||||
globalIsCompressionEnabled = strings.EqualFold(compress, "true")
|
||||
}
|
||||
|
||||
compressExtensions := os.Getenv("MINIO_COMPRESS_EXTENSIONS")
|
||||
compressMimeTypes := os.Getenv("MINIO_COMPRESS_MIMETYPES")
|
||||
if compressExtensions != "" || compressMimeTypes != "" {
|
||||
globalIsEnvCompression = true
|
||||
if compressExtensions != "" {
|
||||
extensions, err := parseCompressIncludes(strings.Split(compressExtensions, compressEnvDelimiter))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid MINIO_COMPRESS_EXTENSIONS value (`%s`)", extensions)
|
||||
}
|
||||
globalCompressExtensions = extensions
|
||||
}
|
||||
if compressMimeTypes != "" {
|
||||
contenttypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, compressEnvDelimiter))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid MINIO_COMPRESS_MIMETYPES value (`%s`)", contenttypes)
|
||||
}
|
||||
globalCompressMimeTypes = contenttypes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
151
cmd/config-common.go
Normal file
151
cmd/config-common.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
_, err := client.Delete(ctx, configFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
}
|
||||
|
||||
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, configFile, string(data))
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
|
||||
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
for watchResp := range globalEtcdClient.Watch(ctx, configFile) {
|
||||
for _, event := range watchResp.Events {
|
||||
if event.IsModify() || event.IsCreate() {
|
||||
loadCfgFn(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
if globalEtcdClient != nil {
|
||||
return checkConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -20,16 +20,19 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Steps to move from version N to version N+1
|
||||
@@ -41,9 +44,9 @@ import (
|
||||
// 6. Make changes in config-current_test.go for any test change
|
||||
|
||||
// Config version
|
||||
const serverConfigVersion = "27"
|
||||
const serverConfigVersion = "33"
|
||||
|
||||
type serverConfig = serverConfigV27
|
||||
type serverConfig = serverConfigV33
|
||||
|
||||
var (
|
||||
// globalServerConfig server config.
|
||||
@@ -64,11 +67,21 @@ func (s *serverConfig) SetRegion(region string) {
|
||||
|
||||
// GetRegion get current region.
|
||||
func (s *serverConfig) GetRegion() string {
|
||||
if globalIsEnvRegion {
|
||||
return globalServerRegion
|
||||
}
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return s.Region
|
||||
}
|
||||
|
||||
// SetCredential sets new credential and returns the previous credential.
|
||||
func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Credentials) {
|
||||
if creds.IsValid() && globalActiveCred.IsValid() {
|
||||
globalActiveCred = creds
|
||||
}
|
||||
|
||||
// Save previous credential.
|
||||
prevCred = s.Credential
|
||||
|
||||
@@ -81,15 +94,12 @@ func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Cred
|
||||
|
||||
// GetCredentials get current credentials.
|
||||
func (s *serverConfig) GetCredential() auth.Credentials {
|
||||
if globalActiveCred.IsValid() {
|
||||
return globalActiveCred
|
||||
}
|
||||
return s.Credential
|
||||
}
|
||||
|
||||
// SetBrowser set if browser is enabled.
|
||||
func (s *serverConfig) SetBrowser(b bool) {
|
||||
// Set the new value.
|
||||
s.Browser = BoolFlag(b)
|
||||
}
|
||||
|
||||
// SetWorm set if worm is enabled.
|
||||
func (s *serverConfig) SetWorm(b bool) {
|
||||
// Set the new value.
|
||||
@@ -104,16 +114,23 @@ func (s *serverConfig) SetStorageClass(standardClass, rrsClass storageClass) {
|
||||
// GetStorageClass reads storage class fields from current config.
|
||||
// It returns the standard and reduced redundancy storage class struct
|
||||
func (s *serverConfig) GetStorageClass() (storageClass, storageClass) {
|
||||
if globalIsStorageClass {
|
||||
return globalStandardStorageClass, globalRRStorageClass
|
||||
}
|
||||
if s == nil {
|
||||
return storageClass{}, storageClass{}
|
||||
}
|
||||
return s.StorageClass.Standard, s.StorageClass.RRS
|
||||
}
|
||||
|
||||
// GetBrowser get current credentials.
|
||||
func (s *serverConfig) GetBrowser() bool {
|
||||
return bool(s.Browser)
|
||||
}
|
||||
|
||||
// GetWorm get current credentials.
|
||||
func (s *serverConfig) GetWorm() bool {
|
||||
if globalIsEnvWORM {
|
||||
return globalWORMEnabled
|
||||
}
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
return bool(s.Worm)
|
||||
}
|
||||
|
||||
@@ -127,10 +144,24 @@ func (s *serverConfig) SetCacheConfig(drives, exclude []string, expiry int, maxu
|
||||
|
||||
// GetCacheConfig gets the current cache config
|
||||
func (s *serverConfig) GetCacheConfig() CacheConfig {
|
||||
if globalIsDiskCacheEnabled {
|
||||
return CacheConfig{
|
||||
Drives: globalCacheDrives,
|
||||
Exclude: globalCacheExcludes,
|
||||
Expiry: globalCacheExpiry,
|
||||
MaxUse: globalCacheMaxUse,
|
||||
}
|
||||
}
|
||||
if s == nil {
|
||||
return CacheConfig{}
|
||||
}
|
||||
return s.Cache
|
||||
}
|
||||
|
||||
func (s *serverConfig) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.Version != serverConfigVersion {
|
||||
return fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", serverConfigVersion, s.Version)
|
||||
}
|
||||
@@ -143,84 +174,232 @@ func (s *serverConfig) Validate() error {
|
||||
}
|
||||
|
||||
// Region: nothing to validate
|
||||
// Browser, Worm, Cache and StorageClass values are already validated during json unmarshal
|
||||
|
||||
if s.Domain != "" {
|
||||
if _, ok := dns.IsDomainName(s.Domain); !ok {
|
||||
return errors.New("invalid domain name")
|
||||
}
|
||||
}
|
||||
|
||||
// Worm, Cache and StorageClass values are already validated during json unmarshal
|
||||
for _, v := range s.Notify.AMQP {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("amqp: %s", err.Error())
|
||||
return fmt.Errorf("amqp: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Elasticsearch {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("elasticsearch: %s", err.Error())
|
||||
return fmt.Errorf("elasticsearch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Kafka {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("kafka: %s", err.Error())
|
||||
return fmt.Errorf("kafka: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MQTT {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mqtt: %s", err.Error())
|
||||
return fmt.Errorf("mqtt: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MySQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mysql: %s", err.Error())
|
||||
return fmt.Errorf("mysql: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NATS {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nats: %s", err.Error())
|
||||
return fmt.Errorf("nats: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NSQ {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nsq: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.PostgreSQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("postgreSQL: %s", err.Error())
|
||||
return fmt.Errorf("postgreSQL: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Redis {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("redis: %s", err.Error())
|
||||
return fmt.Errorf("redis: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Webhook {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("webhook: %s", err.Error())
|
||||
return fmt.Errorf("webhook: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save config file to corresponding backend
|
||||
func Save(configFile string, data interface{}) error {
|
||||
return quick.SaveConfig(data, configFile, globalEtcdClient)
|
||||
// SetCompressionConfig sets the current compression config
|
||||
func (s *serverConfig) SetCompressionConfig(extensions []string, mimeTypes []string) {
|
||||
s.Compression.Extensions = extensions
|
||||
s.Compression.MimeTypes = mimeTypes
|
||||
s.Compression.Enabled = globalIsCompressionEnabled
|
||||
}
|
||||
|
||||
// Load config from backend
|
||||
func Load(configFile string, data interface{}) (quick.Config, error) {
|
||||
return quick.LoadConfig(configFile, globalEtcdClient, data)
|
||||
// GetCompressionConfig gets the current compression config
|
||||
func (s *serverConfig) GetCompressionConfig() compressionConfig {
|
||||
return s.Compression
|
||||
}
|
||||
|
||||
// GetVersion gets config version from backend
|
||||
func GetVersion(configFile string) (string, error) {
|
||||
return quick.GetVersion(configFile, globalEtcdClient)
|
||||
func (s *serverConfig) loadFromEnvs() {
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
s.SetCredential(globalActiveCred)
|
||||
}
|
||||
|
||||
if globalIsEnvWORM {
|
||||
s.SetWorm(globalWORMEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
s.SetRegion(globalServerRegion)
|
||||
}
|
||||
|
||||
if globalIsStorageClass {
|
||||
s.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
s.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
}
|
||||
|
||||
if globalIsEnvCompression {
|
||||
s.SetCompressionConfig(globalCompressExtensions, globalCompressMimeTypes)
|
||||
}
|
||||
|
||||
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
|
||||
if u, err := xnet.ParseURL(jwksURL); err == nil {
|
||||
s.OpenID.JWKS.URL = u
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
|
||||
}
|
||||
}
|
||||
|
||||
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
|
||||
if u, err := xnet.ParseURL(opaURL); err == nil {
|
||||
s.Policy.OPA.URL = u
|
||||
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNotificationTargets tries to establish connections to all notification
|
||||
// targets when enabled. This is a good way to make sure all configurations
|
||||
// set by the user can work.
|
||||
func (s *serverConfig) TestNotificationTargets() error {
|
||||
for k, v := range s.Notify.AMQP {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewAMQPTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amqp(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Elasticsearch {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewElasticsearchTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("elasticsearch(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Kafka {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewKafkaTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kafka(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.MQTT {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMQTTTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mqtt(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.MySQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMySQLTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mysql(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NATS {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNATSTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nats(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NSQ {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNSQTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nsq(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.PostgreSQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewPostgreSQLTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Redis {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewRedisTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the string describing a difference with the given
|
||||
@@ -234,18 +413,18 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
|
||||
return "Credential configuration differs"
|
||||
case s.Region != t.Region:
|
||||
return "Region configuration differs"
|
||||
case s.Browser != t.Browser:
|
||||
return "Browser configuration differs"
|
||||
case s.Domain != t.Domain:
|
||||
return "Domain configuration differs"
|
||||
case s.StorageClass != t.StorageClass:
|
||||
return "StorageClass configuration differs"
|
||||
case !reflect.DeepEqual(s.Cache, t.Cache):
|
||||
return "Cache configuration differs"
|
||||
case !reflect.DeepEqual(s.Compression, t.Compression):
|
||||
return "Compression configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.AMQP, t.Notify.AMQP):
|
||||
return "AMQP Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS):
|
||||
return "NATS Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NSQ, t.Notify.NSQ):
|
||||
return "NSQ Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch):
|
||||
return "ElasticSearch Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis):
|
||||
@@ -262,6 +441,8 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
|
||||
return "MQTT Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Logger, t.Logger):
|
||||
return "Logger configuration differs"
|
||||
case !reflect.DeepEqual(s.KMS, t.KMS):
|
||||
return "KMS configuration differs"
|
||||
case reflect.DeepEqual(s, t):
|
||||
return ""
|
||||
default:
|
||||
@@ -279,7 +460,6 @@ func newServerConfig() *serverConfig {
|
||||
Version: serverConfigVersion,
|
||||
Credential: cred,
|
||||
Region: globalMinioDefaultRegion,
|
||||
Browser: true,
|
||||
StorageClass: storageClassConfig{
|
||||
Standard: storageClass{},
|
||||
RRS: storageClass{},
|
||||
@@ -290,7 +470,13 @@ func newServerConfig() *serverConfig {
|
||||
Expiry: globalCacheExpiry,
|
||||
MaxUse: globalCacheMaxUse,
|
||||
},
|
||||
KMS: crypto.KMSConfig{},
|
||||
Notify: notifier{},
|
||||
Compression: compressionConfig{
|
||||
Enabled: false,
|
||||
Extensions: globalCompressExtensions,
|
||||
MimeTypes: globalCompressMimeTypes,
|
||||
},
|
||||
}
|
||||
|
||||
// Make sure to initialize notification configs.
|
||||
@@ -304,6 +490,8 @@ func newServerConfig() *serverConfig {
|
||||
srvCfg.Notify.Redis["1"] = target.RedisArgs{}
|
||||
srvCfg.Notify.NATS = make(map[string]target.NATSArgs)
|
||||
srvCfg.Notify.NATS["1"] = target.NATSArgs{}
|
||||
srvCfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
srvCfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
|
||||
srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{}
|
||||
srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs)
|
||||
@@ -327,158 +515,116 @@ func newServerConfig() *serverConfig {
|
||||
return srvCfg
|
||||
}
|
||||
|
||||
// newConfig - initialize a new server config, saves env parameters if
|
||||
// found, otherwise use default parameters
|
||||
func newConfig() error {
|
||||
// Initialize server config.
|
||||
srvCfg, err := newQuickConfig(newServerConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
srvCfg.SetCredential(globalActiveCred)
|
||||
}
|
||||
|
||||
if globalIsEnvBrowser {
|
||||
srvCfg.SetBrowser(globalIsBrowserEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvWORM {
|
||||
srvCfg.SetWorm(globalWORMEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
srvCfg.SetRegion(globalServerRegion)
|
||||
}
|
||||
|
||||
if globalIsEnvDomainName {
|
||||
srvCfg.Domain = globalDomainName
|
||||
}
|
||||
|
||||
if globalIsStorageClass {
|
||||
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
// Save the new config globally.
|
||||
// unlock the mutex.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return Save(getConfigFile(), globalServerConfig)
|
||||
}
|
||||
|
||||
// newQuickConfig - initialize a new server config, with an allocated
|
||||
// quick.Config interface.
|
||||
func newQuickConfig(srvCfg *serverConfig) (*serverConfig, error) {
|
||||
qcfg, err := quick.NewConfig(srvCfg, globalEtcdClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srvCfg.Config = qcfg
|
||||
return srvCfg, nil
|
||||
}
|
||||
|
||||
// getValidConfig - returns valid server configuration
|
||||
func getValidConfig() (*serverConfig, error) {
|
||||
srvCfg := &serverConfig{
|
||||
Region: globalMinioDefaultRegion,
|
||||
Browser: true,
|
||||
}
|
||||
|
||||
var err error
|
||||
srvCfg, err = newQuickConfig(srvCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configFile := getConfigFile()
|
||||
if err = srvCfg.Load(configFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = srvCfg.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srvCfg, nil
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params from env
|
||||
// if found and valid
|
||||
func loadConfig() error {
|
||||
srvCfg, err := getValidConfig()
|
||||
if err != nil {
|
||||
return uiErrInvalidConfig(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
srvCfg.SetCredential(globalActiveCred)
|
||||
}
|
||||
|
||||
if globalIsEnvBrowser {
|
||||
srvCfg.SetBrowser(globalIsBrowserEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
srvCfg.SetRegion(globalServerRegion)
|
||||
}
|
||||
|
||||
if globalIsEnvDomainName {
|
||||
srvCfg.Domain = globalDomainName
|
||||
}
|
||||
|
||||
if globalIsStorageClass {
|
||||
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
func (s *serverConfig) loadToCachedConfigs() {
|
||||
if !globalIsEnvCreds {
|
||||
globalActiveCred = globalServerConfig.GetCredential()
|
||||
}
|
||||
if !globalIsEnvBrowser {
|
||||
globalIsBrowserEnabled = globalServerConfig.GetBrowser()
|
||||
globalActiveCred = s.GetCredential()
|
||||
}
|
||||
if !globalIsEnvWORM {
|
||||
globalWORMEnabled = globalServerConfig.GetWorm()
|
||||
globalWORMEnabled = s.GetWorm()
|
||||
}
|
||||
if !globalIsEnvRegion {
|
||||
globalServerRegion = globalServerConfig.GetRegion()
|
||||
}
|
||||
if !globalIsEnvDomainName {
|
||||
globalDomainName = globalServerConfig.Domain
|
||||
globalServerRegion = s.GetRegion()
|
||||
}
|
||||
if !globalIsStorageClass {
|
||||
globalStandardStorageClass, globalRRStorageClass = globalServerConfig.GetStorageClass()
|
||||
globalStandardStorageClass, globalRRStorageClass = s.GetStorageClass()
|
||||
}
|
||||
if !globalIsDiskCacheEnabled {
|
||||
cacheConf := globalServerConfig.GetCacheConfig()
|
||||
cacheConf := s.GetCacheConfig()
|
||||
globalCacheDrives = cacheConf.Drives
|
||||
globalCacheExcludes = cacheConf.Exclude
|
||||
globalCacheExpiry = cacheConf.Expiry
|
||||
globalCacheMaxUse = cacheConf.MaxUse
|
||||
}
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
}
|
||||
|
||||
if !globalIsCompressionEnabled {
|
||||
compressionConf := s.GetCompressionConfig()
|
||||
globalCompressExtensions = compressionConf.Extensions
|
||||
globalCompressMimeTypes = compressionConf.MimeTypes
|
||||
globalIsCompressionEnabled = compressionConf.Enabled
|
||||
}
|
||||
|
||||
globalIAMValidators = getAuthValidators(s)
|
||||
|
||||
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
|
||||
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
|
||||
URL: s.Policy.OPA.URL,
|
||||
AuthToken: s.Policy.OPA.AuthToken,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// newSrvConfig - initialize a new server config, saves env parameters if
|
||||
// found, otherwise use default parameters
|
||||
func newSrvConfig(objAPI ObjectLayer) error {
|
||||
// Initialize server config.
|
||||
srvCfg := newServerConfig()
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.loadFromEnvs()
|
||||
|
||||
// Load values to cached global values.
|
||||
srvCfg.loadToCachedConfigs()
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
|
||||
}
|
||||
|
||||
// getValidConfig - returns valid server configuration
|
||||
func getValidConfig(objAPI ObjectLayer) (*serverConfig, error) {
|
||||
srvCfg, err := readServerConfig(context.Background(), objAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srvCfg, srvCfg.Validate()
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params from env
|
||||
// if found and valid
|
||||
func loadConfig(objAPI ObjectLayer) error {
|
||||
srvCfg, err := getValidConfig(objAPI)
|
||||
if err != nil {
|
||||
return uiErrInvalidConfig(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.loadFromEnvs()
|
||||
|
||||
// Load values to cached global values.
|
||||
srvCfg.loadToCachedConfigs()
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAuthValidators - returns ValidatorList which contains
|
||||
// enabled providers in server config.
|
||||
// A new authentication provider is added like below
|
||||
// * Add a new provider in pkg/iam/validator package.
|
||||
func getAuthValidators(config *serverConfig) *validator.Validators {
|
||||
validators := validator.NewValidators()
|
||||
|
||||
if config.OpenID.JWKS.URL != nil {
|
||||
validators.Add(validator.NewJWT(config.OpenID.JWKS))
|
||||
}
|
||||
|
||||
return validators
|
||||
}
|
||||
|
||||
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
// A new notification target is added like below
|
||||
// * Add a new target in pkg/event/target package.
|
||||
@@ -486,7 +632,9 @@ func loadConfig() error {
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
targetList := event.NewTargetList()
|
||||
|
||||
if config == nil {
|
||||
return targetList
|
||||
}
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args)
|
||||
@@ -533,6 +681,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.MQTT {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
@@ -573,6 +722,20 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.NSQ {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNSQTarget(id, args)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
@@ -603,6 +766,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget := target.NewWebhookTarget(id, args)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -27,12 +27,15 @@ import (
|
||||
)
|
||||
|
||||
func TestServerConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
if globalServerConfig.GetRegion() != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalServerConfig.GetRegion())
|
||||
@@ -49,16 +52,12 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Errorf("Expecting version %s found %s", globalServerConfig.GetVersion(), serverConfigVersion)
|
||||
}
|
||||
|
||||
// Attempt to save.
|
||||
if err := globalServerConfig.Save(getConfigFile()); err != nil {
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
t.Fatalf("Unable to save updated config file %s", err)
|
||||
}
|
||||
|
||||
// Do this only once here.
|
||||
setConfigDir(rootPath)
|
||||
|
||||
// Initialize server config.
|
||||
if err := loadConfig(); err != nil {
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
@@ -68,6 +67,9 @@ func TestServerConfigWithEnvs(t *testing.T) {
|
||||
os.Setenv("MINIO_BROWSER", "off")
|
||||
defer os.Unsetenv("MINIO_BROWSER")
|
||||
|
||||
os.Setenv("MINIO_WORM", "on")
|
||||
defer os.Unsetenv("MINIO_WORM")
|
||||
|
||||
os.Setenv("MINIO_ACCESS_KEY", "minio")
|
||||
defer os.Unsetenv("MINIO_ACCESS_KEY")
|
||||
|
||||
@@ -82,61 +84,70 @@ func TestServerConfigWithEnvs(t *testing.T) {
|
||||
|
||||
defer resetGlobalIsEnvs()
|
||||
|
||||
// Get test root.
|
||||
rootPath, err := getTestRoot()
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
serverHandleEnvVars()
|
||||
|
||||
// Do this only once here.
|
||||
setConfigDir(rootPath)
|
||||
|
||||
// Init config
|
||||
initConfig()
|
||||
initConfig(objLayer)
|
||||
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// Check if serverConfig has
|
||||
if globalServerConfig.GetBrowser() {
|
||||
t.Errorf("Expecting browser is set to false found %v", globalServerConfig.GetBrowser())
|
||||
// Check if serverConfig has browser disabled
|
||||
if globalIsBrowserEnabled {
|
||||
t.Error("Expected browser to be disabled but it is not")
|
||||
}
|
||||
|
||||
// Check if serverConfig has
|
||||
// Check if serverConfig returns WORM config from the env
|
||||
if !globalServerConfig.GetWorm() {
|
||||
t.Error("Expected WORM to be enabled but it is not")
|
||||
}
|
||||
|
||||
// Check if serverConfig has region from the environment
|
||||
if globalServerConfig.GetRegion() != "us-west-1" {
|
||||
t.Errorf("Expecting region to be \"us-west-1\" found %v", globalServerConfig.GetRegion())
|
||||
t.Errorf("Expected region to be \"us-west-1\", found %v", globalServerConfig.GetRegion())
|
||||
}
|
||||
|
||||
// Check if serverConfig has
|
||||
// Check if serverConfig has credentials from the environment
|
||||
cred := globalServerConfig.GetCredential()
|
||||
|
||||
if cred.AccessKey != "minio" {
|
||||
t.Errorf("Expecting access key to be `minio` found %s", cred.AccessKey)
|
||||
t.Errorf("Expected access key to be `minio`, found %s", cred.AccessKey)
|
||||
}
|
||||
|
||||
if cred.SecretKey != "minio123" {
|
||||
t.Errorf("Expecting access key to be `minio123` found %s", cred.SecretKey)
|
||||
t.Errorf("Expected access key to be `minio123`, found %s", cred.SecretKey)
|
||||
}
|
||||
|
||||
if globalServerConfig.Domain != "domain.com" {
|
||||
t.Errorf("Expecting Domain to be `domain.com` found " + globalServerConfig.Domain)
|
||||
// Check if serverConfig has the correct domain
|
||||
if globalDomainName != "domain.com" {
|
||||
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainName)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests config validator..
|
||||
func TestValidateConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
configPath := filepath.Join(rootPath, minioConfigFile)
|
||||
|
||||
configPath := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
v := serverConfigVersion
|
||||
|
||||
testCases := []struct {
|
||||
@@ -177,7 +188,7 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
@@ -222,18 +233,21 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
||||
// Test 27 - Test MQTT
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": ""}}}}`, false},
|
||||
|
||||
// Test 28 - Test NSQ
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if werr := ioutil.WriteFile(configPath, []byte(testCase.configData), 0700); werr != nil {
|
||||
t.Fatal(werr)
|
||||
if err = saveConfig(context.Background(), objLayer, configPath, []byte(testCase.configData)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, verr := getValidConfig()
|
||||
if testCase.shouldPass && verr != nil {
|
||||
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, verr)
|
||||
_, err = getValidConfig(objLayer)
|
||||
if testCase.shouldPass && err != nil {
|
||||
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, err)
|
||||
}
|
||||
if !testCase.shouldPass && verr == nil {
|
||||
if !testCase.shouldPass && err == nil {
|
||||
t.Errorf("Test %d, should fail but it succeeded.", i+1)
|
||||
}
|
||||
}
|
||||
@@ -249,77 +263,87 @@ func TestConfigDiff(t *testing.T) {
|
||||
{&serverConfig{}, nil, "Given configuration is empty"},
|
||||
// 2
|
||||
{
|
||||
&serverConfig{Credential: auth.Credentials{"u1", "p1"}},
|
||||
&serverConfig{Credential: auth.Credentials{"u1", "p2"}},
|
||||
&serverConfig{Credential: auth.Credentials{
|
||||
AccessKey: "u1",
|
||||
SecretKey: "p1",
|
||||
Expiration: timeSentinel,
|
||||
}},
|
||||
&serverConfig{Credential: auth.Credentials{
|
||||
AccessKey: "u1",
|
||||
SecretKey: "p2",
|
||||
Expiration: timeSentinel,
|
||||
}},
|
||||
"Credential configuration differs",
|
||||
},
|
||||
// 3
|
||||
{&serverConfig{Region: "us-east-1"}, &serverConfig{Region: "us-west-1"}, "Region configuration differs"},
|
||||
// 4
|
||||
{&serverConfig{Browser: false}, &serverConfig{Browser: true}, "Browser configuration differs"},
|
||||
// 5
|
||||
{&serverConfig{Domain: "domain1"}, &serverConfig{Domain: "domain2"}, "Domain configuration differs"},
|
||||
// 6
|
||||
{
|
||||
&serverConfig{StorageClass: storageClassConfig{storageClass{"1", 8}, storageClass{"2", 6}}},
|
||||
&serverConfig{StorageClass: storageClassConfig{storageClass{"1", 8}, storageClass{"2", 4}}},
|
||||
"StorageClass configuration differs",
|
||||
},
|
||||
// 7
|
||||
// 5
|
||||
{
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: false}}}},
|
||||
"AMQP Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
// 6
|
||||
{
|
||||
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: false}}}},
|
||||
"NATS Notification configuration differs",
|
||||
},
|
||||
// 9
|
||||
// 7
|
||||
{
|
||||
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: false}}}},
|
||||
"NSQ Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
{
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}},
|
||||
"ElasticSearch Notification configuration differs",
|
||||
},
|
||||
// 10
|
||||
// 9
|
||||
{
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}},
|
||||
"Redis Notification configuration differs",
|
||||
},
|
||||
// 11
|
||||
// 10
|
||||
{
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}},
|
||||
"PostgreSQL Notification configuration differs",
|
||||
},
|
||||
// 12
|
||||
// 11
|
||||
{
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}},
|
||||
"Kafka Notification configuration differs",
|
||||
},
|
||||
// 13
|
||||
// 12
|
||||
{
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}},
|
||||
"Webhook Notification configuration differs",
|
||||
},
|
||||
// 14
|
||||
// 13
|
||||
{
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}},
|
||||
"MySQL Notification configuration differs",
|
||||
},
|
||||
// 15
|
||||
// 14
|
||||
{
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}},
|
||||
"MQTT Notification configuration differs",
|
||||
},
|
||||
// 16
|
||||
// 15
|
||||
{
|
||||
&serverConfig{Logger: loggerConfig{
|
||||
Console: loggerConsole{Enabled: true},
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
@@ -28,9 +27,6 @@ const (
|
||||
// Default minio configuration directory where below configuration files/directories are stored.
|
||||
defaultMinioConfigDir = ".minio"
|
||||
|
||||
// Minio configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// Directory contains below files/directories for HTTPS configuration.
|
||||
certsDir = "certs"
|
||||
|
||||
@@ -44,55 +40,9 @@ const (
|
||||
privateKeyFile = "private.key"
|
||||
)
|
||||
|
||||
// ConfigDir - configuration directory with locking.
|
||||
// ConfigDir - points to a user set directory.
|
||||
type ConfigDir struct {
|
||||
sync.Mutex
|
||||
dir string
|
||||
}
|
||||
|
||||
// Set - saves given directory as configuration directory.
|
||||
func (config *ConfigDir) Set(dir string) {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
config.dir = dir
|
||||
}
|
||||
|
||||
// Get - returns current configuration directory.
|
||||
func (config *ConfigDir) Get() string {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
return config.dir
|
||||
}
|
||||
|
||||
func (config *ConfigDir) getCertsDir() string {
|
||||
return filepath.Join(config.Get(), certsDir)
|
||||
}
|
||||
|
||||
// GetCADir - returns certificate CA directory.
|
||||
func (config *ConfigDir) GetCADir() string {
|
||||
return filepath.Join(config.getCertsDir(), certsCADir)
|
||||
}
|
||||
|
||||
// Create - creates configuration directory tree.
|
||||
func (config *ConfigDir) Create() error {
|
||||
return os.MkdirAll(config.GetCADir(), 0700)
|
||||
}
|
||||
|
||||
// GetMinioConfigFile - returns absolute path of config.json file.
|
||||
func (config *ConfigDir) GetMinioConfigFile() string {
|
||||
return filepath.Join(config.Get(), minioConfigFile)
|
||||
}
|
||||
|
||||
// GetPublicCertFile - returns absolute path of public.crt file.
|
||||
func (config *ConfigDir) GetPublicCertFile() string {
|
||||
return filepath.Join(config.getCertsDir(), publicCertFile)
|
||||
}
|
||||
|
||||
// GetPrivateKeyFile - returns absolute path of private.key file.
|
||||
func (config *ConfigDir) GetPrivateKeyFile() string {
|
||||
return filepath.Join(config.getCertsDir(), privateKeyFile)
|
||||
path string
|
||||
}
|
||||
|
||||
func getDefaultConfigDir() string {
|
||||
@@ -104,32 +54,54 @@ func getDefaultConfigDir() string {
|
||||
return filepath.Join(homeDir, defaultMinioConfigDir)
|
||||
}
|
||||
|
||||
var configDir = &ConfigDir{dir: getDefaultConfigDir()}
|
||||
|
||||
func setConfigDir(dir string) {
|
||||
configDir.Set(dir)
|
||||
func getDefaultCertsDir() string {
|
||||
return filepath.Join(getDefaultConfigDir(), certsDir)
|
||||
}
|
||||
|
||||
func getConfigDir() string {
|
||||
return configDir.Get()
|
||||
func getDefaultCertsCADir() string {
|
||||
return filepath.Join(getDefaultCertsDir(), certsCADir)
|
||||
}
|
||||
|
||||
func getCADir() string {
|
||||
return configDir.GetCADir()
|
||||
var (
|
||||
// Default config, certs and CA directories.
|
||||
defaultConfigDir = &ConfigDir{path: getDefaultConfigDir()}
|
||||
defaultCertsDir = &ConfigDir{path: getDefaultCertsDir()}
|
||||
defaultCertsCADir = &ConfigDir{path: getDefaultCertsCADir()}
|
||||
|
||||
// Points to current configuration directory -- deprecated, to be removed in future.
|
||||
globalConfigDir = defaultConfigDir
|
||||
// Points to current certs directory set by user with --certs-dir
|
||||
globalCertsDir = defaultCertsDir
|
||||
// Points to relative path to certs directory and is <value-of-certs-dir>/CAs
|
||||
globalCertsCADir = defaultCertsCADir
|
||||
)
|
||||
|
||||
// Get - returns current directory.
|
||||
func (dir *ConfigDir) Get() string {
|
||||
return dir.path
|
||||
}
|
||||
|
||||
func createConfigDir() error {
|
||||
return configDir.Create()
|
||||
// Attempts to create all directories, ignores any permission denied errors.
|
||||
func mkdirAllIgnorePerm(path string) error {
|
||||
err := os.MkdirAll(path, 0700)
|
||||
if err != nil {
|
||||
// It is possible in kubernetes like deployments this directory
|
||||
// is already mounted and is not writable, ignore any write errors.
|
||||
if os.IsPermission(err) {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getConfigFile() string {
|
||||
return configDir.GetMinioConfigFile()
|
||||
return filepath.Join(globalConfigDir.Get(), minioConfigFile)
|
||||
}
|
||||
|
||||
func getPublicCertFile() string {
|
||||
return configDir.GetPublicCertFile()
|
||||
return filepath.Join(globalCertsDir.Get(), publicCertFile)
|
||||
}
|
||||
|
||||
func getPrivateKeyFile() string {
|
||||
return configDir.GetPrivateKeyFile()
|
||||
return filepath.Join(globalCertsDir.Get(), privateKeyFile)
|
||||
}
|
||||
|
||||
@@ -17,15 +17,20 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
@@ -33,7 +38,22 @@ import (
|
||||
// DO NOT EDIT following message template, please open a github issue to discuss instead.
|
||||
var configMigrateMSGTemplate = "Configuration file %s migrated from version '%s' to '%s' successfully."
|
||||
|
||||
// Migrates all config versions from "1" to "18".
|
||||
// Save config file to corresponding backend
|
||||
func Save(configFile string, data interface{}) error {
|
||||
return quick.SaveConfig(data, configFile, globalEtcdClient)
|
||||
}
|
||||
|
||||
// Load config from backend
|
||||
func Load(configFile string, data interface{}) (quick.Config, error) {
|
||||
return quick.LoadConfig(configFile, globalEtcdClient, data)
|
||||
}
|
||||
|
||||
// GetVersion gets config version from backend
|
||||
func GetVersion(configFile string) (string, error) {
|
||||
return quick.GetVersion(configFile, globalEtcdClient)
|
||||
}
|
||||
|
||||
// Migrates all config versions from "1" to "28".
|
||||
func migrateConfig() error {
|
||||
// Purge all configs with version '1',
|
||||
// this is a special case since version '1' used
|
||||
@@ -45,6 +65,9 @@ func migrateConfig() error {
|
||||
// Load only config version information.
|
||||
version, err := GetVersion(getConfigFile())
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -193,6 +216,11 @@ func migrateConfig() error {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "27":
|
||||
if err = migrateV27ToV28(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case serverConfigVersion:
|
||||
// No migration needed. this always points to current version.
|
||||
err = nil
|
||||
@@ -202,11 +230,11 @@ func migrateConfig() error {
|
||||
|
||||
// Version '1' is not supported anymore and deprecated, safe to delete.
|
||||
func purgeV1() error {
|
||||
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
|
||||
configFile := filepath.Join(globalConfigDir.Get(), "fsUsers.json")
|
||||
|
||||
cv1 := &configV1{}
|
||||
_, err := Load(configFile, cv1)
|
||||
if os.IsNotExist(err) || err == dns.ErrNoEntriesFound {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘1’. %v", err)
|
||||
@@ -890,7 +918,7 @@ func migrateV12ToV13() error {
|
||||
// Copy over fields from V12 into V13 config struct
|
||||
srvConfig := &serverConfigV13{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "13"
|
||||
srvConfig.Credential = cv12.Credential
|
||||
@@ -970,7 +998,7 @@ func migrateV13ToV14() error {
|
||||
// Copy over fields from V13 into V14 config struct
|
||||
srvConfig := &serverConfigV14{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "14"
|
||||
srvConfig.Credential = cv13.Credential
|
||||
@@ -1055,7 +1083,7 @@ func migrateV14ToV15() error {
|
||||
// Copy over fields from V14 into V15 config struct
|
||||
srvConfig := &serverConfigV15{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "15"
|
||||
srvConfig.Credential = cv14.Credential
|
||||
@@ -1145,7 +1173,7 @@ func migrateV15ToV16() error {
|
||||
// Copy over fields from V15 into V16 config struct
|
||||
srvConfig := &serverConfigV16{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "16"
|
||||
srvConfig.Credential = cv15.Credential
|
||||
@@ -1235,7 +1263,7 @@ func migrateV16ToV17() error {
|
||||
// Copy over fields from V16 into V17 config struct
|
||||
srvConfig := &serverConfigV17{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "17"
|
||||
srvConfig.Credential = cv16.Credential
|
||||
@@ -1356,7 +1384,7 @@ func migrateV17ToV18() error {
|
||||
// Copy over fields from V17 into V18 config struct
|
||||
srvConfig := &serverConfigV17{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "18"
|
||||
srvConfig.Credential = cv17.Credential
|
||||
@@ -1458,7 +1486,7 @@ func migrateV18ToV19() error {
|
||||
// Copy over fields from V18 into V19 config struct
|
||||
srvConfig := &serverConfigV18{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "19"
|
||||
srvConfig.Credential = cv18.Credential
|
||||
@@ -1564,7 +1592,7 @@ func migrateV19ToV20() error {
|
||||
// Copy over fields from V19 into V20 config struct
|
||||
srvConfig := &serverConfigV20{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "20"
|
||||
srvConfig.Credential = cv19.Credential
|
||||
@@ -1668,7 +1696,7 @@ func migrateV20ToV21() error {
|
||||
|
||||
// Copy over fields from V20 into V21 config struct
|
||||
srvConfig := &serverConfigV21{
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "21"
|
||||
srvConfig.Credential = cv20.Credential
|
||||
@@ -1772,7 +1800,7 @@ func migrateV21ToV22() error {
|
||||
|
||||
// Copy over fields from V21 into V22 config struct
|
||||
srvConfig := &serverConfigV22{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "22"
|
||||
srvConfig.Credential = cv21.Credential
|
||||
@@ -1876,7 +1904,7 @@ func migrateV22ToV23() error {
|
||||
|
||||
// Copy over fields from V22 into V23 config struct
|
||||
srvConfig := &serverConfigV23{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "23"
|
||||
srvConfig.Credential = cv22.Credential
|
||||
@@ -1989,7 +2017,7 @@ func migrateV23ToV24() error {
|
||||
|
||||
// Copy over fields from V23 into V24 config struct
|
||||
srvConfig := &serverConfigV24{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "24"
|
||||
srvConfig.Credential = cv23.Credential
|
||||
@@ -2102,7 +2130,7 @@ func migrateV24ToV25() error {
|
||||
|
||||
// Copy over fields from V24 into V25 config struct
|
||||
srvConfig := &serverConfigV25{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "25"
|
||||
srvConfig.Credential = cv24.Credential
|
||||
@@ -2220,7 +2248,7 @@ func migrateV25ToV26() error {
|
||||
|
||||
// Copy over fields from V25 into V26 config struct
|
||||
srvConfig := &serverConfigV26{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "26"
|
||||
srvConfig.Credential = cv25.Credential
|
||||
@@ -2355,3 +2383,353 @@ func migrateV26ToV27() error {
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "26", "27")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV27ToV28() error {
|
||||
configFile := getConfigFile()
|
||||
|
||||
// config V28 is backward compatible with V27, load the old
|
||||
// config file in serverConfigV28 struct and initialize KMSConfig
|
||||
|
||||
srvConfig := &serverConfigV28{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
|
||||
if srvConfig.Version != "27" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srvConfig.Version = "28"
|
||||
srvConfig.KMS = crypto.KMSConfig{}
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
|
||||
func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// Construct path to config.json for the given bucket.
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Verify if config was already available in .minio.sys in which case, nothing more to be done.
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
if err == nil {
|
||||
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
|
||||
}
|
||||
}()
|
||||
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
// Verify if backend already has the file (after holding lock)
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var config = &serverConfig{}
|
||||
if _, err = Load(getConfigFile(), config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Read from deprecate file as well if necessary.
|
||||
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// If all else fails simply initialize the server config.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' to v33.
|
||||
func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Check if the config version is latest, if not migrate.
|
||||
ok, _, err := checkConfigVersion(objAPI, configFile, serverConfigVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct path to config.json for the given bucket.
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
if err := migrateV27ToV28MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV28ToV29MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV29ToV30MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV30ToV31MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV31ToV32MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
return migrateV32ToV33MinioSys(objAPI)
|
||||
}
|
||||
|
||||
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
|
||||
data, err := readConfig(context.Background(), objAPI, configFile)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
var versionConfig struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
vcfg := &versionConfig
|
||||
if err = json.Unmarshal(data, vcfg); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return vcfg.Version == version, data, nil
|
||||
}
|
||||
|
||||
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "27")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV28{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "28"
|
||||
cfg.KMS = crypto.KMSConfig{}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "28")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV29{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "29"
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘28’ to ‘29’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "28", "29")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "29")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV30{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "30"
|
||||
// Init compression config.For future migration, Compression config needs to be copied over from previous version.
|
||||
cfg.Compression.Enabled = false
|
||||
cfg.Compression.Extensions = globalCompressExtensions
|
||||
cfg.Compression.MimeTypes = globalCompressMimeTypes
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘29’ to ‘30’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "29", "30")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "30")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV31{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "31"
|
||||
cfg.OpenID.JWKS = validator.JWKSArgs{
|
||||
URL: &xnet.URL{},
|
||||
}
|
||||
cfg.Policy.OPA = iampolicy.OpaArgs{
|
||||
URL: &xnet.URL{},
|
||||
AuthToken: "",
|
||||
}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘30’ to ‘31’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "30", "31")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "31")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV32{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "32"
|
||||
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
cfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘31’ to ‘32’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "31", "32")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "32")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV33{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "33"
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from 32 to 33 . %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,14 +25,25 @@ import (
|
||||
|
||||
// Test if config v1 is purged
|
||||
func TestServerConfigMigrateV1(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
err = newTestConfig(globalMinioDefaultRegion, objLayer)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Create a V1 config json file and store it
|
||||
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
||||
@@ -45,13 +56,14 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Check if config v1 is removed from filesystem
|
||||
if _, err := os.Stat(configPath); err == nil || !os.IsNotExist(err) {
|
||||
t.Fatal("Config V1 file is not purged")
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(); err != nil {
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
@@ -59,20 +71,13 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
// Test if all migrate code returns nil when config file does not
|
||||
// exist
|
||||
func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Remove config file
|
||||
if err := os.Remove(configPath); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
if err := migrateV2ToV3(); err != nil {
|
||||
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
|
||||
@@ -149,18 +154,27 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
if err := migrateV26ToV27(); err != nil {
|
||||
t.Fatal("migrate v26 to v27 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV27ToV28(); err != nil {
|
||||
t.Fatal("migrate v27 to v28 should succeed when no config file is found")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if a config migration from v2 to v27 is successfully done
|
||||
func TestServerConfigMigrateV2toV27(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
// Test if a config migration from v2 to v33 is successfully done
|
||||
func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
@@ -186,8 +200,16 @@ func TestServerConfigMigrateV2toV27(t *testing.T) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err := migrateConfigToMinioSys(objLayer); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err := migrateMinioSysConfig(objLayer); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(); err != nil {
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
|
||||
@@ -201,6 +223,7 @@ func TestServerConfigMigrateV2toV27(t *testing.T) {
|
||||
if globalServerConfig.Credential.AccessKey != accessKey {
|
||||
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, globalServerConfig.Credential.AccessKey)
|
||||
}
|
||||
|
||||
if globalServerConfig.Credential.SecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, globalServerConfig.Credential.SecretKey)
|
||||
}
|
||||
@@ -208,14 +231,13 @@ func TestServerConfigMigrateV2toV27(t *testing.T) {
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
@@ -299,18 +321,20 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
if err := migrateV26ToV27(); err == nil {
|
||||
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV27ToV28(); err == nil {
|
||||
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
|
||||
@@ -19,8 +19,11 @@ package cmd
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -260,9 +263,6 @@ type serverConfigV7 struct {
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// serverConfigV8 server configuration version '8'. Adds NATS notifier
|
||||
@@ -279,9 +279,6 @@ type serverConfigV8 struct {
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
|
||||
@@ -298,9 +295,6 @@ type serverConfigV9 struct {
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV1 `json:"notify"`
|
||||
|
||||
// Read Write mutex.
|
||||
rwMutex *sync.RWMutex
|
||||
}
|
||||
|
||||
type loggerV7 struct {
|
||||
@@ -370,7 +364,7 @@ type serverConfigV12 struct {
|
||||
Notify notifierV2 `json:"notify"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
type notifierV3 struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
@@ -395,7 +389,7 @@ type serverConfigV13 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV14 server configuration version '14' which is like
|
||||
@@ -412,7 +406,7 @@ type serverConfigV14 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV15 server configuration version '15' which is like
|
||||
@@ -429,7 +423,7 @@ type serverConfigV15 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// FileLogger is introduced to workaround the dependency about logrus
|
||||
@@ -467,7 +461,7 @@ type serverConfigV16 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV17 server configuration version '17' which is like
|
||||
@@ -486,7 +480,7 @@ type serverConfigV17 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV18 server configuration version '18' which is like
|
||||
@@ -505,7 +499,7 @@ type serverConfigV18 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV19 server configuration version '19' which is like
|
||||
@@ -523,7 +517,7 @@ type serverConfigV19 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV20 server configuration version '20' which is like
|
||||
@@ -542,7 +536,7 @@ type serverConfigV20 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV21 is just like version '20' without logger field
|
||||
@@ -557,7 +551,7 @@ type serverConfigV21 struct {
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV22 is just like version '21' with added support
|
||||
@@ -578,7 +572,7 @@ type serverConfigV22 struct {
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV23 is just like version '22' with addition of cache field.
|
||||
@@ -601,7 +595,7 @@ type serverConfigV23 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV24 is just like version '23', we had to revert
|
||||
@@ -625,7 +619,7 @@ type serverConfigV24 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV25 is just like version '24', stores additionally
|
||||
@@ -652,7 +646,7 @@ type serverConfigV25 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV26 is just like version '25', stores additionally
|
||||
@@ -676,7 +670,7 @@ type serverConfigV26 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
type loggerConsole struct {
|
||||
@@ -717,8 +711,219 @@ type serverConfigV27 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV28 is just like version '27', additionally
|
||||
// storing KMS config
|
||||
//
|
||||
// IMPORTANT NOTE: When updating this struct make sure that
|
||||
// serverConfig.ConfigDiff() is updated as necessary.
|
||||
type serverConfigV28 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV29 is just like version '28'.
|
||||
type serverConfigV29 serverConfigV28
|
||||
|
||||
// compressionConfig represents the compression settings.
|
||||
type compressionConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Extensions []string `json:"extensions"`
|
||||
MimeTypes []string `json:"mime-types"`
|
||||
}
|
||||
|
||||
// serverConfigV30 is just like version '29', stores additionally
|
||||
// extensions and mimetypes fields for compression.
|
||||
type serverConfigV30 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
}
|
||||
|
||||
// serverConfigV31 is just like version '30', with OPA and OpenID configuration.
|
||||
type serverConfigV31 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
MQTT map[string]target.MQTTArgs `json:"mqtt"`
|
||||
MySQL map[string]target.MySQLArgs `json:"mysql"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
NSQ map[string]target.NSQArgs `json:"nsq"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
Webhook map[string]target.WebhookArgs `json:"webhook"`
|
||||
}
|
||||
|
||||
// serverConfigV32 is just like version '31' with added nsq notifer.
|
||||
type serverConfigV32 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir with MQTT.
|
||||
type serverConfigV33 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
196
cmd/config.go
Normal file
196
cmd/config.go
Normal file
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
const (
|
||||
minioConfigPrefix = "config"
|
||||
|
||||
// Minio configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// Minio backup file
|
||||
minioConfigBackupFile = minioConfigFile + ".backup"
|
||||
)
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverConfig) error {
|
||||
if err := quick.CheckData(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(config, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
return saveConfigEtcd(ctx, globalEtcdClient, configFile, data)
|
||||
}
|
||||
|
||||
// Create a backup of the current config
|
||||
oldData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err == nil {
|
||||
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
|
||||
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save the new config in the std config path
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
|
||||
var configData []byte
|
||||
var err error
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
configData, err = readConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
} else {
|
||||
configData, err = readConfig(ctx, objAPI, configFile)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
configData = bytes.Replace(configData, []byte("\r\n"), []byte("\n"), -1)
|
||||
}
|
||||
|
||||
if err = quick.CheckDuplicateKeys(string(configData)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config = &serverConfig{}
|
||||
if err = json.Unmarshal(configData, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = quick.CheckData(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// ConfigSys - config system.
|
||||
type ConfigSys struct{}
|
||||
|
||||
// Load - load config.json.
|
||||
func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
|
||||
return sys.Init(objAPI)
|
||||
}
|
||||
|
||||
// Init - initializes config system from config.json.
|
||||
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Initializing configuration needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed.
|
||||
for range newRetryTimerSimple(doneCh) {
|
||||
if err := initConfig(objAPI); err != nil {
|
||||
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfigSys - creates new config system object.
|
||||
func NewConfigSys() *ConfigSys {
|
||||
return &ConfigSys{}
|
||||
}
|
||||
|
||||
// Initialize and load config from remote etcd or local config directory
|
||||
func initConfig(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
if globalEtcdClient != nil {
|
||||
if err := checkConfigEtcd(context.Background(), globalEtcdClient, getConfigFile()); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
// Migrates all configs at old location.
|
||||
if err = migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrates etcd ${HOME}/.minio/config.json to '/config/config.json'
|
||||
if err = migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Watch config for changes and reloads them.
|
||||
go watchConfigEtcd(objAPI, configFile, loadConfig)
|
||||
|
||||
} else {
|
||||
if isFile(getConfigFile()) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
// ignore if the file doesn't exist.
|
||||
if err := migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
|
||||
if err := migrateMinioSysConfig(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return loadConfig(objAPI)
|
||||
}
|
||||
@@ -17,90 +17,53 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Writes S3 compatible copy part range error.
|
||||
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
|
||||
func writeCopyPartErr(ctx context.Context, w http.ResponseWriter, err error, url *url.URL, browser bool) {
|
||||
switch err {
|
||||
case errInvalidRange:
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRange), url, browser)
|
||||
return
|
||||
case errInvalidRangeSource:
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser)
|
||||
return
|
||||
default:
|
||||
writeErrorResponse(w, ErrInternalError, url)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), url, browser)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Parses x-amz-copy-source-range for CopyObjectPart API. Specifically written to
|
||||
// differentiate the behavior between regular httpRange header v/s x-amz-copy-source-range.
|
||||
// The range of bytes to copy from the source object. The range value must use the form
|
||||
// bytes=first-last, where the first and last are the zero-based byte offsets to copy.
|
||||
// For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source.
|
||||
// Parses x-amz-copy-source-range for CopyObjectPart API. Its behavior
|
||||
// is different from regular HTTP range header. It only supports the
|
||||
// form `bytes=first-last` where first and last are zero-based byte
|
||||
// offsets. See
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
func parseCopyPartRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) {
|
||||
// Return error if given range string doesn't start with byte range prefix.
|
||||
if !strings.HasPrefix(rangeString, byteRangePrefix) {
|
||||
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix)
|
||||
// for full details. This function treats an empty rangeString as
|
||||
// referring to the whole resource.
|
||||
func parseCopyPartRangeSpec(rangeString string) (hrange *HTTPRangeSpec, err error) {
|
||||
hrange, err = parseRequestRangeSpec(rangeString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Trim byte range prefix.
|
||||
byteRangeString := strings.TrimPrefix(rangeString, byteRangePrefix)
|
||||
|
||||
// Check if range string contains delimiter '-', else return error. eg. "bytes=8"
|
||||
sepIndex := strings.Index(byteRangeString, "-")
|
||||
if sepIndex == -1 {
|
||||
if hrange.IsSuffixLength || hrange.Start < 0 || hrange.End < 0 {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
|
||||
offsetBeginString := byteRangeString[:sepIndex]
|
||||
offsetBegin := int64(-1)
|
||||
// Convert offsetBeginString only if its not empty.
|
||||
if len(offsetBeginString) > 0 {
|
||||
if !validBytePos.MatchString(offsetBeginString) {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
}
|
||||
|
||||
offsetEndString := byteRangeString[sepIndex+1:]
|
||||
offsetEnd := int64(-1)
|
||||
// Convert offsetEndString only if its not empty.
|
||||
if len(offsetEndString) > 0 {
|
||||
if !validBytePos.MatchString(offsetEndString) {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
}
|
||||
|
||||
// rangeString contains first byte positions. eg. "bytes=2-" or
|
||||
// rangeString contains last bye positions. eg. "bytes=-2"
|
||||
if offsetBegin == -1 || offsetEnd == -1 {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
|
||||
// Last byte position should not be greater than first byte
|
||||
// position. eg. "bytes=5-2"
|
||||
if offsetBegin > offsetEnd {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
|
||||
// First and last byte positions should not be >= resourceSize.
|
||||
if offsetBegin >= resourceSize || offsetEnd >= resourceSize {
|
||||
return nil, errInvalidRangeSource
|
||||
}
|
||||
|
||||
// Success..
|
||||
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
|
||||
return hrange, nil
|
||||
}
|
||||
|
||||
// checkCopyPartRangeWithSize adds more check to the range string in case of
|
||||
// copy object part. This API requires having specific start and end range values
|
||||
// e.g. 'bytes=3-10'. Other use cases will be rejected.
|
||||
func checkCopyPartRangeWithSize(rs *HTTPRangeSpec, resourceSize int64) (err error) {
|
||||
if rs == nil {
|
||||
return nil
|
||||
}
|
||||
if rs.IsSuffixLength || rs.Start >= resourceSize || rs.End >= resourceSize {
|
||||
return errInvalidRangeSource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,35 +19,37 @@ package cmd
|
||||
import "testing"
|
||||
|
||||
// Test parseCopyPartRange()
|
||||
func TestParseCopyPartRange(t *testing.T) {
|
||||
func TestParseCopyPartRangeSpec(t *testing.T) {
|
||||
// Test success cases.
|
||||
successCases := []struct {
|
||||
rangeString string
|
||||
offsetBegin int64
|
||||
offsetEnd int64
|
||||
length int64
|
||||
}{
|
||||
{"bytes=2-5", 2, 5, 4},
|
||||
{"bytes=2-9", 2, 9, 8},
|
||||
{"bytes=2-2", 2, 2, 1},
|
||||
{"bytes=0000-0006", 0, 6, 7},
|
||||
{"bytes=2-5", 2, 5},
|
||||
{"bytes=2-9", 2, 9},
|
||||
{"bytes=2-2", 2, 2},
|
||||
{"bytes=0000-0006", 0, 6},
|
||||
}
|
||||
objectSize := int64(10)
|
||||
|
||||
for _, successCase := range successCases {
|
||||
hrange, err := parseCopyPartRange(successCase.rangeString, 10)
|
||||
rs, err := parseCopyPartRangeSpec(successCase.rangeString)
|
||||
if err != nil {
|
||||
t.Fatalf("expected: <nil>, got: %s", err)
|
||||
}
|
||||
|
||||
if hrange.offsetBegin != successCase.offsetBegin {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin)
|
||||
start, length, err1 := rs.GetOffsetLength(objectSize)
|
||||
if err1 != nil {
|
||||
t.Fatalf("expected: <nil>, got: %s", err1)
|
||||
}
|
||||
|
||||
if hrange.offsetEnd != successCase.offsetEnd {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd)
|
||||
if start != successCase.offsetBegin {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, start)
|
||||
}
|
||||
if hrange.getLength() != successCase.length {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
|
||||
|
||||
if start+length-1 != successCase.offsetEnd {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, start+length-1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,15 +61,16 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
"bytes=2-+5",
|
||||
"bytes=2--5",
|
||||
"bytes=-",
|
||||
"",
|
||||
"2-5",
|
||||
"bytes = 2-5",
|
||||
"bytes=2 - 5",
|
||||
"bytes=0-0,-1",
|
||||
"bytes=2-5 ",
|
||||
"bytes=-1",
|
||||
"bytes=1-",
|
||||
}
|
||||
for _, rangeString := range invalidRangeStrings {
|
||||
if _, err := parseCopyPartRange(rangeString, 10); err == nil {
|
||||
if _, err := parseCopyPartRangeSpec(rangeString); err == nil {
|
||||
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
|
||||
}
|
||||
}
|
||||
@@ -78,8 +81,14 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
"bytes=20-30",
|
||||
}
|
||||
for _, rangeString := range errorRangeString {
|
||||
if _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
|
||||
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||
rs, err := parseCopyPartRangeSpec(rangeString)
|
||||
if err == nil {
|
||||
err1 := checkCopyPartRangeWithSize(rs, objectSize)
|
||||
if err1 != errInvalidRangeSource {
|
||||
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected: %s, got: <nil>", errInvalidRangeSource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
21
cmd/crypto/config.go
Normal file
21
cmd/crypto/config.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
// KMSConfig has the KMS config for hashicorp vault
|
||||
type KMSConfig struct {
|
||||
AutoEncryption bool `json:"-"`
|
||||
Vault VaultConfig `json:"vault"`
|
||||
}
|
||||
@@ -43,10 +43,17 @@ var (
|
||||
// base64-encoded string or not 256 bits long.
|
||||
ErrInvalidCustomerKey = errors.New("The SSE-C client key is invalid")
|
||||
|
||||
// ErrSecretKeyMismatch indicates that the provided secret key (SSE-C client key / SSE-S3 KMS key)
|
||||
// does not match the secret key used during encrypting the object.
|
||||
ErrSecretKeyMismatch = errors.New("The secret key does not match the secret key used during upload")
|
||||
|
||||
// ErrCustomerKeyMD5Mismatch indicates that the SSE-C key MD5 does not match the
|
||||
// computed MD5 sum. This means that the client provided either the wrong key for
|
||||
// a certain MD5 checksum or the wrong MD5 for a certain key.
|
||||
ErrCustomerKeyMD5Mismatch = errors.New("The provided SSE-C key MD5 does not match the computed MD5 of the SSE-C key")
|
||||
// ErrIncompatibleEncryptionMethod indicates that both SSE-C headers and SSE-S3 headers were specified, and are incompatible
|
||||
// The client needs to remove the SSE-S3 header or the SSE-C headers
|
||||
ErrIncompatibleEncryptionMethod = errors.New("Server side encryption specified with both SSE-C and SSE-S3 headers")
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -19,11 +19,22 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SSEHeader is the general AWS SSE HTTP header key.
|
||||
const SSEHeader = "X-Amz-Server-Side-Encryption"
|
||||
|
||||
const (
|
||||
// SSEKmsID is the HTTP header key referencing the SSE-KMS
|
||||
// key ID.
|
||||
SSEKmsID = SSEHeader + "-Aws-Kms-Key-Id"
|
||||
|
||||
// SSEKmsContext is the HTTP header key referencing the
|
||||
// SSE-KMS encryption context.
|
||||
SSEKmsContext = SSEHeader + "-Context"
|
||||
)
|
||||
|
||||
const (
|
||||
// SSECAlgorithm is the HTTP header key referencing
|
||||
// the SSE-C algorithm.
|
||||
@@ -52,10 +63,24 @@ const (
|
||||
SSECopyKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
|
||||
)
|
||||
|
||||
// SSEAlgorithmAES256 is the only supported value for the SSE-S3 or SSE-C algorithm header.
|
||||
// For SSE-S3 see: https://docs.aws.amazon.com/AmazonS3/latest/dev/SSEUsingRESTAPI.html
|
||||
// For SSE-C see: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
|
||||
const SSEAlgorithmAES256 = "AES256"
|
||||
const (
|
||||
// SSEAlgorithmAES256 is the only supported value for the SSE-S3 or SSE-C algorithm header.
|
||||
// For SSE-S3 see: https://docs.aws.amazon.com/AmazonS3/latest/dev/SSEUsingRESTAPI.html
|
||||
// For SSE-C see: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
|
||||
SSEAlgorithmAES256 = "AES256"
|
||||
|
||||
// SSEAlgorithmKMS is the value of 'X-Amz-Server-Side-Encryption' for SSE-KMS.
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
|
||||
SSEAlgorithmKMS = "aws:kms"
|
||||
)
|
||||
|
||||
// RemoveSensitiveHeaders removes confidential encryption
|
||||
// information - e.g. the SSE-C key - from the HTTP headers.
|
||||
// It has the same semantics as RemoveSensitiveEntires.
|
||||
func RemoveSensitiveHeaders(h http.Header) {
|
||||
h.Del(SSECKey)
|
||||
h.Del(SSECopyKey)
|
||||
}
|
||||
|
||||
// S3 represents AWS SSE-S3. It provides functionality to handle
|
||||
// SSE-S3 requests.
|
||||
@@ -67,7 +92,7 @@ type s3 struct{}
|
||||
// the S3 client requests SSE-S3.
|
||||
func (s3) IsRequested(h http.Header) bool {
|
||||
_, ok := h[SSEHeader]
|
||||
return ok
|
||||
return ok && strings.ToLower(h.Get(SSEHeader)) != SSEAlgorithmKMS // Return only true if the SSE header is specified and does not contain the SSE-KMS value
|
||||
}
|
||||
|
||||
// ParseHTTP parses the SSE-S3 related HTTP headers and checks
|
||||
@@ -79,6 +104,27 @@ func (s3) ParseHTTP(h http.Header) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// S3KMS represents AWS SSE-KMS. It provides functionality to
|
||||
// handle SSE-KMS requests.
|
||||
var S3KMS = s3KMS{}
|
||||
|
||||
type s3KMS struct{}
|
||||
|
||||
// IsRequested returns true if the HTTP headers indicates that
|
||||
// the S3 client requests SSE-KMS.
|
||||
func (s3KMS) IsRequested(h http.Header) bool {
|
||||
if _, ok := h[SSEKmsID]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSEKmsContext]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSEHeader]; ok {
|
||||
return strings.ToUpper(h.Get(SSEHeader)) != SSEAlgorithmAES256 // Return only true if the SSE header is specified and does not contain the SSE-S3 value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// SSEC represents AWS SSE-C. It provides functionality to handle
|
||||
// SSE-C requests.
|
||||
@@ -126,7 +172,6 @@ func (ssecCopy) IsRequested(h http.Header) bool {
|
||||
// ParseHTTP parses the SSE-C headers and returns the SSE-C client key
|
||||
// on success. SSE-C copy headers are ignored.
|
||||
func (ssec) ParseHTTP(h http.Header) (key [32]byte, err error) {
|
||||
defer h.Del(SSECKey) // remove SSE-C key from headers after parsing
|
||||
if h.Get(SSECAlgorithm) != SSEAlgorithmAES256 {
|
||||
return key, ErrInvalidCustomerAlgorithm
|
||||
}
|
||||
@@ -152,7 +197,6 @@ func (ssec) ParseHTTP(h http.Header) (key [32]byte, err error) {
|
||||
// ParseHTTP parses the SSE-C copy headers and returns the SSE-C client key
|
||||
// on success. Regular SSE-C headers are ignored.
|
||||
func (ssecCopy) ParseHTTP(h http.Header) (key [32]byte, err error) {
|
||||
defer h.Del(SSECopyKey) // remove SSE-C copy key of source object from headers after parsing
|
||||
if h.Get(SSECopyAlgorithm) != SSEAlgorithmAES256 {
|
||||
return key, ErrInvalidCustomerAlgorithm
|
||||
}
|
||||
|
||||
@@ -16,17 +16,53 @@ package crypto
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var kmsIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"aws:kms"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"0839-9047947-844842874-481"}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Context": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{""},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{""},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 4
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 5
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: false}, // 6
|
||||
}
|
||||
|
||||
func TestKMSIsRequested(t *testing.T) {
|
||||
for i, test := range kmsIsRequestedTests {
|
||||
if got := S3KMS.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3IsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{SSEAlgorithmKMS}}, Expected: false}, // 4
|
||||
}
|
||||
|
||||
func TestS3IsRequested(t *testing.T) {
|
||||
@@ -59,7 +95,7 @@ var ssecIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
@@ -101,7 +137,7 @@ var ssecCopyIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
@@ -215,9 +251,6 @@ func TestSSECParse(t *testing.T) {
|
||||
if err == nil && key == zeroKey {
|
||||
t.Errorf("Test %d: parsed client key is zero key", i)
|
||||
}
|
||||
if _, ok := test.Header[SSECKey]; ok {
|
||||
t.Errorf("Test %d: client key is not removed from HTTP headers after parsing", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,3 +335,102 @@ func TestSSECopyParse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var removeSensitiveHeadersTests = []struct {
|
||||
Header, ExpectedHeader http.Header
|
||||
}{
|
||||
{
|
||||
Header: http.Header{
|
||||
SSECKey: []string{""},
|
||||
SSECopyKey: []string{""},
|
||||
},
|
||||
ExpectedHeader: http.Header{},
|
||||
},
|
||||
{ // Standard SSE-C request headers
|
||||
Header: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
},
|
||||
{ // Standard SSE-C + SSE-C-copy request headers
|
||||
Header: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
},
|
||||
{ // Standard SSE-C + metadata request headers
|
||||
Header: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveSensitiveHeaders(t *testing.T) {
|
||||
isEqual := func(x, y http.Header) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
for k, v := range x {
|
||||
u, ok := y[k]
|
||||
if !ok || len(v) != len(u) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(v)
|
||||
sort.Strings(u)
|
||||
for j := range v {
|
||||
if v[j] != u[j] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
areKeysEqual := func(h http.Header, metadata map[string]string) bool {
|
||||
if len(h) != len(metadata) {
|
||||
return false
|
||||
}
|
||||
for k := range h {
|
||||
if _, ok := metadata[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for i, test := range removeSensitiveHeadersTests {
|
||||
metadata := make(map[string]string, len(test.Header))
|
||||
for k := range test.Header {
|
||||
metadata[k] = "" // set metadata key - we don't care about the value
|
||||
}
|
||||
|
||||
RemoveSensitiveHeaders(test.Header)
|
||||
if !isEqual(test.ExpectedHeader, test.Header) {
|
||||
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
|
||||
}
|
||||
RemoveSensitiveEntries(metadata)
|
||||
if !areKeysEqual(test.ExpectedHeader, metadata) {
|
||||
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@ import (
|
||||
type ObjectKey [32]byte
|
||||
|
||||
// GenerateKey generates a unique ObjectKey from a 256 bit external key
|
||||
// and a source of randomness. If random is nil the default PRNG of system
|
||||
// (crypto/rand) is used.
|
||||
// and a source of randomness. If random is nil the default PRNG of the
|
||||
// system (crypto/rand) is used.
|
||||
func GenerateKey(extKey [32]byte, random io.Reader) (key ObjectKey) {
|
||||
if random == nil {
|
||||
random = rand.Reader
|
||||
@@ -52,6 +52,19 @@ func GenerateKey(extKey [32]byte, random io.Reader) (key ObjectKey) {
|
||||
return key
|
||||
}
|
||||
|
||||
// GenerateIV generates a new random 256 bit IV from the provided source
|
||||
// of randomness. If random is nil the default PRNG of the system
|
||||
// (crypto/rand) is used.
|
||||
func GenerateIV(random io.Reader) (iv [32]byte) {
|
||||
if random == nil {
|
||||
random = rand.Reader
|
||||
}
|
||||
if _, err := io.ReadFull(random, iv[:]); err != nil {
|
||||
logger.CriticalIf(context.Background(), errOutOfEntropy)
|
||||
}
|
||||
return iv
|
||||
}
|
||||
|
||||
// SealedKey represents a sealed object key. It can be stored
|
||||
// at an untrusted location.
|
||||
type SealedKey struct {
|
||||
@@ -74,7 +87,6 @@ func (key ObjectKey) Seal(extKey, iv [32]byte, domain, bucket, object string) Se
|
||||
mac.Write([]byte(SealAlgorithm))
|
||||
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
|
||||
mac.Sum(sealingKey[:0])
|
||||
|
||||
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key"))
|
||||
}
|
||||
@@ -112,7 +124,7 @@ func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucke
|
||||
}
|
||||
|
||||
if n, err := sio.Decrypt(&decryptedKey, bytes.NewReader(sealedKey.Key[:]), unsealConfig); n != 32 || err != nil {
|
||||
return err // TODO(aead): upgrade sio to use sio.Error
|
||||
return ErrSecretKeyMismatch
|
||||
}
|
||||
copy(key[:], decryptedKey.Bytes())
|
||||
return nil
|
||||
@@ -128,3 +140,37 @@ func (key ObjectKey) DerivePartKey(id uint32) (partKey [32]byte) {
|
||||
mac.Sum(partKey[:0])
|
||||
return partKey
|
||||
}
|
||||
|
||||
// SealETag seals the etag using the object key.
|
||||
// It does not encrypt empty ETags because such ETags indicate
|
||||
// that the S3 client hasn't sent an ETag = MD5(object) and
|
||||
// the backend can pick an ETag value.
|
||||
func (key ObjectKey) SealETag(etag []byte) []byte {
|
||||
if len(etag) == 0 { // don't encrypt empty ETag - only if client sent ETag = MD5(object)
|
||||
return etag
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// UnsealETag unseals the etag using the provided object key.
|
||||
// It does not try to decrypt the ETag if len(etag) == 16
|
||||
// because such ETags indicate that the S3 client hasn't sent
|
||||
// an ETag = MD5(object) and the backend has picked an ETag value.
|
||||
func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
|
||||
if !IsETagSealed(etag) {
|
||||
return etag, nil
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
@@ -61,6 +61,32 @@ func TestGenerateKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var generateIVTests = []struct {
|
||||
Random io.Reader
|
||||
ShouldPass bool
|
||||
}{
|
||||
{Random: nil, ShouldPass: true}, // 0
|
||||
{Random: rand.Reader, ShouldPass: true}, // 1
|
||||
{Random: shortRandom(32), ShouldPass: true}, // 2
|
||||
{Random: shortRandom(31), ShouldPass: false}, // 3
|
||||
}
|
||||
|
||||
func TestGenerateIV(t *testing.T) {
|
||||
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
|
||||
logger.Disable = true
|
||||
|
||||
for i, test := range generateIVTests {
|
||||
i, test := i, test
|
||||
func() {
|
||||
defer recoverTest(i, test.ShouldPass, t)
|
||||
iv := GenerateIV(test.Random)
|
||||
if iv == [32]byte{} {
|
||||
t.Errorf("Test %d: generated IV is zero IV", i) // check that we generate random and unique IV
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
var sealUnsealKeyTests = []struct {
|
||||
SealExtKey, SealIV [32]byte
|
||||
SealDomain, SealBucket, SealObject string
|
||||
@@ -140,3 +166,31 @@ func TestDerivePartKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sealUnsealETagTests = []string{
|
||||
"",
|
||||
"90682b8e8cc7609c",
|
||||
"90682b8e8cc7609c4671e1d64c73fc30",
|
||||
"90682b8e8cc7609c4671e1d64c73fc307fb3104f",
|
||||
}
|
||||
|
||||
func TestSealETag(t *testing.T) {
|
||||
var key ObjectKey
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
for i, etag := range sealUnsealETagTests {
|
||||
tag, err := hex.DecodeString(etag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decode etag: %s", i, err)
|
||||
}
|
||||
sealedETag := key.SealETag(tag)
|
||||
unsealedETag, err := key.UnsealETag(sealedETag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decrypt etag: %s", i, err)
|
||||
}
|
||||
if !bytes.Equal(unsealedETag, tag) {
|
||||
t.Errorf("Test %d: unsealed etag does not match: got %s - want %s", i, hex.EncodeToString(unsealedETag), etag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,34 @@ func IsMultiPart(metadata map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveSensitiveEntries removes confidential encryption
|
||||
// information - e.g. the SSE-C key - from the metadata map.
|
||||
// It has the same semantics as RemoveSensitiveHeaders.
|
||||
func RemoveSensitiveEntries(metadata map[string]string) { // The functions is tested in TestRemoveSensitiveHeaders for compatibility reasons
|
||||
delete(metadata, SSECKey)
|
||||
delete(metadata, SSECopyKey)
|
||||
}
|
||||
|
||||
// RemoveSSEHeaders removes all crypto-specific SSE
|
||||
// header entries from the metadata map.
|
||||
func RemoveSSEHeaders(metadata map[string]string) {
|
||||
delete(metadata, SSEHeader)
|
||||
delete(metadata, SSECKeyMD5)
|
||||
delete(metadata, SSECAlgorithm)
|
||||
}
|
||||
|
||||
// RemoveInternalEntries removes all crypto-specific internal
|
||||
// metadata entries from the metadata map.
|
||||
func RemoveInternalEntries(metadata map[string]string) {
|
||||
delete(metadata, SSEMultipart)
|
||||
delete(metadata, SSEIV)
|
||||
delete(metadata, SSESealAlgorithm)
|
||||
delete(metadata, SSECSealedKey)
|
||||
delete(metadata, S3SealedKey)
|
||||
delete(metadata, S3KMSKeyID)
|
||||
delete(metadata, S3KMSSealedKey)
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that it was uploaded using some form of server-side-encryption.
|
||||
//
|
||||
@@ -211,3 +239,6 @@ func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
return sealedKey, nil
|
||||
}
|
||||
|
||||
// IsETagSealed returns true if the etag seems to be encrypted.
|
||||
func IsETagSealed(etag []byte) bool { return len(etag) > 16 }
|
||||
|
||||
@@ -17,6 +17,7 @@ package crypto
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -326,15 +327,20 @@ func TestS3CreateMetadata(t *testing.T) {
|
||||
_ = S3.CreateMetadata(nil, "", []byte{}, SealedKey{Algorithm: InsecureSealAlgorithm})
|
||||
}
|
||||
|
||||
var ssecCreateMetadataTests = []SealedKey{
|
||||
{Algorithm: SealAlgorithm},
|
||||
{IV: [32]byte{0xff}, Key: [64]byte{0x7e}, Algorithm: SealAlgorithm},
|
||||
var ssecCreateMetadataTests = []struct {
|
||||
KeyID string
|
||||
SealedDataKey []byte
|
||||
SealedKey SealedKey
|
||||
}{
|
||||
{KeyID: "", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "deadbeef", SealedDataKey: make([]byte, 32), SealedKey: SealedKey{IV: [32]byte{0xf7}, Key: [64]byte{0xea}, Algorithm: SealAlgorithm}},
|
||||
}
|
||||
|
||||
func TestSSECCreateMetadata(t *testing.T) {
|
||||
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
|
||||
logger.Disable = true
|
||||
for i, test := range s3CreateMetadataTests {
|
||||
for i, test := range ssecCreateMetadataTests {
|
||||
metadata := SSEC.CreateMetadata(nil, test.SealedKey)
|
||||
sealedKey, err := SSEC.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
@@ -359,3 +365,75 @@ func TestSSECCreateMetadata(t *testing.T) {
|
||||
}()
|
||||
_ = SSEC.CreateMetadata(nil, SealedKey{Algorithm: InsecureSealAlgorithm})
|
||||
}
|
||||
|
||||
var isETagSealedTests = []struct {
|
||||
ETag string
|
||||
IsSealed bool
|
||||
}{
|
||||
{ETag: "", IsSealed: false}, // 0
|
||||
{ETag: "90682b8e8cc7609c4671e1d64c73fc30", IsSealed: false}, // 1
|
||||
{ETag: "f201040c9dc593e39ea004dc1323699bcd", IsSealed: true}, // 2 not valid ciphertext but looks like sealed ETag
|
||||
{ETag: "20000f00fba2ee2ae4845f725964eeb9e092edfabc7ab9f9239e8344341f769a51ce99b4801b0699b92b16a72fa94972", IsSealed: true}, // 3
|
||||
}
|
||||
|
||||
func TestIsETagSealed(t *testing.T) {
|
||||
for i, test := range isETagSealedTests {
|
||||
etag, err := hex.DecodeString(test.ETag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decode etag: %s", i, err)
|
||||
}
|
||||
if sealed := IsETagSealed(etag); sealed != test.IsSealed {
|
||||
t.Errorf("Test %d: got %v - want %v", i, sealed, test.IsSealed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var removeInternalEntriesTests = []struct {
|
||||
Metadata, Expected map[string]string
|
||||
}{
|
||||
{ // 0
|
||||
Metadata: map[string]string{
|
||||
SSEMultipart: "",
|
||||
SSEIV: "",
|
||||
SSESealAlgorithm: "",
|
||||
SSECSealedKey: "",
|
||||
S3SealedKey: "",
|
||||
S3KMSKeyID: "",
|
||||
S3KMSSealedKey: "",
|
||||
},
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
{ // 1
|
||||
Metadata: map[string]string{
|
||||
SSEMultipart: "",
|
||||
SSEIV: "",
|
||||
"X-Amz-Meta-A": "X",
|
||||
"X-Minio-Internal-B": "Y",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"X-Amz-Meta-A": "X",
|
||||
"X-Minio-Internal-B": "Y",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveInternalEntries(t *testing.T) {
|
||||
isEqual := func(x, y map[string]string) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
for k, v := range x {
|
||||
if u, ok := y[k]; !ok || v != u {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for i, test := range removeInternalEntriesTests {
|
||||
RemoveInternalEntries(test.Metadata)
|
||||
if !isEqual(test.Metadata, test.Expected) {
|
||||
t.Errorf("Test %d: got %v - want %v", i, test.Metadata, test.Expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
@@ -65,6 +67,63 @@ const (
|
||||
InsecureSealAlgorithm = "DARE-SHA256"
|
||||
)
|
||||
|
||||
// String returns the SSE domain as string. For SSE-S3 the
|
||||
// domain is "SSE-S3".
|
||||
func (s3) String() string { return "SSE-S3" }
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using KMS and returns the decrypted object
|
||||
// key.
|
||||
func (sse s3) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
keyID, kmsKey, sealedKey, err := sse.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = key.Unseal(unsealKey, sealedKey, sse.String(), bucket, object)
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the SSE domain as string. For SSE-C the
|
||||
// domain is "SSE-C".
|
||||
func (ssec) String() string { return "SSE-C" }
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using the SSE-C client key of the HTTP headers
|
||||
// and returns the decrypted object key.
|
||||
func (sse ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
clientKey, err := sse.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return unsealObjectKey(clientKey, metadata, bucket, object)
|
||||
}
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using the SSE-Copy client key of the HTTP headers
|
||||
// and returns the decrypted object key.
|
||||
func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
clientKey, err := sse.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return unsealObjectKey(clientKey, metadata, bucket, object)
|
||||
}
|
||||
|
||||
// unsealObjectKey decrypts and returns the sealed object key
|
||||
// from the metadata using the SSE-C client key.
|
||||
func unsealObjectKey(clientKey [32]byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
sealedKey, err := SSEC.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = key.Unseal(clientKey, sealedKey, SSEC.String(), bucket, object)
|
||||
return
|
||||
}
|
||||
|
||||
// EncryptSinglePart encrypts an io.Reader which must be the
|
||||
// the body of a single-part PUT request.
|
||||
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
|
||||
|
||||
226
cmd/crypto/sse_test.go
Normal file
226
cmd/crypto/sse_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestS3String(t *testing.T) {
|
||||
const Domain = "SSE-S3"
|
||||
if domain := S3.String(); domain != Domain {
|
||||
t.Errorf("S3's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSECString(t *testing.T) {
|
||||
const Domain = "SSE-C"
|
||||
if domain := SSEC.String(); domain != Domain {
|
||||
t.Errorf("SSEC's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
|
||||
}
|
||||
}
|
||||
|
||||
var ssecUnsealObjectKeyTests = []struct {
|
||||
Headers http.Header
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object2",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrSecretKeyMismatch,
|
||||
},
|
||||
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECUnsealObjectKey(t *testing.T) {
|
||||
for i, test := range ssecUnsealObjectKeyTests {
|
||||
if _, err := SSEC.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sseCopyUnsealObjectKeyTests = []struct {
|
||||
Headers http.Header
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object2",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrSecretKeyMismatch,
|
||||
},
|
||||
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECopyUnsealObjectKey(t *testing.T) {
|
||||
for i, test := range sseCopyUnsealObjectKeyTests {
|
||||
if _, err := SSECopy.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3UnsealObjectKeyTests = []struct {
|
||||
KMS KMS
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid KMS key-ID and valid metadata entries for bucket/object
|
||||
KMS: NewKMS([32]byte{}),
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid KMS key-ID for invalid metadata entries for bucket/object
|
||||
KMS: NewKMS([32]byte{}),
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
}
|
||||
|
||||
func TestS3UnsealObjectKey(t *testing.T) {
|
||||
for i, test := range s3UnsealObjectKeyTests {
|
||||
if _, err := S3.UnsealObjectKey(test.KMS, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
252
cmd/crypto/vault.go
Normal file
252
cmd/crypto/vault.go
Normal file
@@ -0,0 +1,252 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
vault "github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
var (
|
||||
//ErrKMSAuthLogin is raised when there is a failure authenticating to KMS
|
||||
ErrKMSAuthLogin = errors.New("Vault service did not return auth info")
|
||||
)
|
||||
|
||||
// VaultKey represents vault encryption key-ring.
|
||||
type VaultKey struct {
|
||||
Name string `json:"name"` // The name of the encryption key-ring
|
||||
Version int `json:"version"` // The key version
|
||||
}
|
||||
|
||||
// VaultAuth represents vault authentication type.
|
||||
// Currently the only supported authentication type is AppRole.
|
||||
type VaultAuth struct {
|
||||
Type string `json:"type"` // The authentication type
|
||||
AppRole VaultAppRole `json:"approle"` // The AppRole authentication credentials
|
||||
}
|
||||
|
||||
// VaultAppRole represents vault AppRole authentication credentials
|
||||
type VaultAppRole struct {
|
||||
ID string `json:"id"` // The AppRole access ID
|
||||
Secret string `json:"secret"` // The AppRole secret
|
||||
}
|
||||
|
||||
// VaultConfig represents vault configuration.
|
||||
type VaultConfig struct {
|
||||
Endpoint string `json:"endpoint"` // The vault API endpoint as URL
|
||||
CAPath string `json:"-"` // The path to PEM-encoded certificate files used for mTLS. Currently not used in config file.
|
||||
Auth VaultAuth `json:"auth"` // The vault authentication configuration
|
||||
Key VaultKey `json:"key-id"` // The named key used for key-generation / decryption.
|
||||
Namespace string `json:"-"` // The vault namespace of enterprise vault instances
|
||||
}
|
||||
|
||||
// vaultService represents a connection to a vault KMS.
|
||||
type vaultService struct {
|
||||
config *VaultConfig
|
||||
client *vault.Client
|
||||
secret *vault.Secret
|
||||
leaseDuration time.Duration
|
||||
}
|
||||
|
||||
var _ KMS = (*vaultService)(nil) // compiler check that *vaultService implements KMS
|
||||
|
||||
// empty/default vault configuration used to check whether a particular is empty.
|
||||
var emptyVaultConfig = VaultConfig{}
|
||||
|
||||
// IsEmpty returns true if the vault config struct is an
|
||||
// empty configuration.
|
||||
func (v *VaultConfig) IsEmpty() bool { return *v == emptyVaultConfig }
|
||||
|
||||
// Verify returns a nil error if the vault configuration
|
||||
// is valid. A valid configuration is either empty or
|
||||
// contains valid non-default values.
|
||||
func (v *VaultConfig) Verify() (err error) {
|
||||
if v.IsEmpty() {
|
||||
return // an empty configuration is valid
|
||||
}
|
||||
switch {
|
||||
case v.Endpoint == "":
|
||||
err = errors.New("crypto: missing hashicorp vault endpoint")
|
||||
case strings.ToLower(v.Auth.Type) != "approle":
|
||||
err = fmt.Errorf("crypto: invalid hashicorp vault authentication type: %s is not supported", v.Auth.Type)
|
||||
case v.Auth.AppRole.ID == "":
|
||||
err = errors.New("crypto: missing hashicorp vault AppRole ID")
|
||||
case v.Auth.AppRole.Secret == "":
|
||||
err = errors.New("crypto: missing hashicorp vault AppSecret ID")
|
||||
case v.Key.Name == "":
|
||||
err = errors.New("crypto: missing hashicorp vault key name")
|
||||
case v.Key.Version < 0:
|
||||
err = errors.New("crypto: invalid hashicorp vault key version: The key version must not be negative")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewVault initializes Hashicorp Vault KMS by authenticating
|
||||
// to Vault with the credentials in config and gets a client
|
||||
// token for future api calls.
|
||||
func NewVault(config VaultConfig) (KMS, error) {
|
||||
if config.IsEmpty() {
|
||||
return nil, errors.New("crypto: the hashicorp vault configuration must not be empty")
|
||||
}
|
||||
if err := config.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vaultCfg := vault.Config{Address: config.Endpoint}
|
||||
if err := vaultCfg.ConfigureTLS(&vault.TLSConfig{CAPath: config.CAPath}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := vault.NewClient(&vaultCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Namespace != "" {
|
||||
client.SetNamespace(config.Namespace)
|
||||
}
|
||||
v := &vaultService{client: client, config: &config}
|
||||
if err := v.authenticate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.renewToken()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// renewToken starts a new go-routine which renews
|
||||
// the vault authentication token periodically and re-authenticates
|
||||
// if the token renewal fails
|
||||
func (v *vaultService) renewToken() {
|
||||
retryDelay := v.leaseDuration / 2
|
||||
go func() {
|
||||
for {
|
||||
if v.secret == nil {
|
||||
if err := v.authenticate(); err != nil {
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
}
|
||||
s, err := v.client.Auth().Token().RenewSelf(int(v.leaseDuration))
|
||||
if err != nil || s == nil {
|
||||
v.secret = nil
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
if ok, err := s.TokenIsRenewable(); !ok || err != nil {
|
||||
v.secret = nil
|
||||
continue
|
||||
}
|
||||
ttl, err := s.TokenTTL()
|
||||
if err != nil {
|
||||
v.secret = nil
|
||||
continue
|
||||
}
|
||||
v.secret = s
|
||||
retryDelay = ttl / 2
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// authenticate logs the app to vault, and starts the auto renewer
|
||||
// before secret expires
|
||||
func (v *vaultService) authenticate() (err error) {
|
||||
payload := map[string]interface{}{
|
||||
"role_id": v.config.Auth.AppRole.ID,
|
||||
"secret_id": v.config.Auth.AppRole.Secret,
|
||||
}
|
||||
var tokenID string
|
||||
var ttl time.Duration
|
||||
var secret *vault.Secret
|
||||
secret, err = v.client.Logical().Write("auth/approle/login", payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if secret == nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
|
||||
tokenID, err = secret.TokenID()
|
||||
if err != nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
ttl, err = secret.TokenTTL()
|
||||
if err != nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
v.client.SetToken(tokenID)
|
||||
v.secret = secret
|
||||
v.leaseDuration = ttl
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateKey returns a new plaintext key, generated by the KMS,
|
||||
// and a sealed version of this plaintext key encrypted using the
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
sealKey := s.Data["ciphertext"].(string)
|
||||
plainKey, err := base64.StdEncoding.DecodeString(s.Data["plaintext"].(string))
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, []byte(sealKey), nil
|
||||
}
|
||||
|
||||
// UnsealKey returns the decrypted sealedKey as plaintext key.
|
||||
// Therefore it sends the sealedKey to the KMS which decrypts
|
||||
// it using the named key referenced by keyID and responses with
|
||||
// the plaintext key.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
base64Key := s.Data["plaintext"].(string)
|
||||
plainKey, err := base64.StdEncoding.DecodeString(base64Key)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, nil
|
||||
}
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
@@ -92,7 +91,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
|
||||
appendFileMap: make(map[string]*fsAppendFile),
|
||||
}
|
||||
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
|
||||
|
||||
cacheFS := cacheFSObjects{
|
||||
FSObjects: fsObjects,
|
||||
@@ -159,7 +158,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-globalServiceDoneCh:
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
|
||||
@@ -258,7 +257,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) error {
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) error {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
@@ -275,7 +274,7 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
||||
return pErr
|
||||
}
|
||||
}
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, metadata)
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, opts)
|
||||
// if err is due to disk being offline , mark cache drive as offline
|
||||
if IsErr(err, baseErrs...) {
|
||||
cfs.setOnline(false)
|
||||
@@ -284,8 +283,8 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
||||
}
|
||||
|
||||
// Returns the handle for the cached object
|
||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
// Deletes the cached object
|
||||
@@ -295,13 +294,14 @@ func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (e
|
||||
|
||||
// convenience function to check if object is cached on this cacheFSObjects
|
||||
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
|
||||
_, err := cfs.GetObjectInfo(ctx, bucket, object)
|
||||
_, err := cfs.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||
// headers.
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||
data := r.Reader
|
||||
fs := cfs.FSObjects
|
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
@@ -312,7 +312,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
meta := make(map[string]string)
|
||||
for k, v := range metadata {
|
||||
for k, v := range opts.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
@@ -354,7 +354,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < 0 {
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument)
|
||||
return ObjectInfo{}, errInvalidArgument
|
||||
}
|
||||
@@ -438,7 +438,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
// Implements S3 compatible initiate multipart API. Operation here is identical
|
||||
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
||||
// generated on the backend
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string) (string, error) {
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, uploadID string, opts ObjectOptions) (string, error) {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
@@ -472,7 +472,7 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
|
||||
|
||||
// Initialize fs.json values.
|
||||
fsMeta := newFSMetaV1()
|
||||
fsMeta.Meta = meta
|
||||
fsMeta.Meta = opts.UserDefined
|
||||
|
||||
fsMetaBytes, err := json.Marshal(fsMeta)
|
||||
if err != nil {
|
||||
|
||||
@@ -31,14 +31,14 @@ import (
|
||||
|
||||
"github.com/djherbis/atime"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
const (
|
||||
// disk cache needs to have cacheSizeMultiplier * object size space free for a cache entry to be created.
|
||||
cacheSizeMultiplier = 100
|
||||
// disk cache needs to have object size space free for a cache entry to be created.
|
||||
cacheTrashDir = "trash"
|
||||
cacheCleanupInterval = 10 // in minutes
|
||||
)
|
||||
@@ -57,18 +57,19 @@ type cacheObjects struct {
|
||||
// file path patterns to exclude from cache
|
||||
exclude []string
|
||||
// Object functions pointing to the corresponding functions of backend implementation.
|
||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
GetObjectInfoFn func(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
||||
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteBucketFn func(ctx context.Context, bucket string) error
|
||||
}
|
||||
|
||||
@@ -88,21 +89,27 @@ type CacheObjectLayer interface {
|
||||
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
DeleteBucket(ctx context.Context, bucket string) error
|
||||
// Object operations.
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(ctx context.Context, bucket, object string) error
|
||||
|
||||
// Multipart operations.
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
|
||||
// Storage operations.
|
||||
StorageInfo(ctx context.Context) CacheStorageInfo
|
||||
}
|
||||
|
||||
// IsCacheable returns if the object should be saved in the cache.
|
||||
func (o ObjectInfo) IsCacheable() bool {
|
||||
return !crypto.IsEncrypted(o.UserDefined)
|
||||
}
|
||||
|
||||
// backendDownError returns true if err is due to backend failure or faulty disk if in server mode
|
||||
func backendDownError(err error) bool {
|
||||
_, backendDown := err.(BackendDown)
|
||||
@@ -175,22 +182,98 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
return metadata
|
||||
}
|
||||
|
||||
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
|
||||
objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts)
|
||||
if backendDownError(err) && cacheErr == nil {
|
||||
return cacheReader, nil
|
||||
} else if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
// Delete cached entry if backend object was deleted.
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !objInfo.IsCacheable() || filterFromCache(objInfo.UserDefined) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
if cacheErr == nil {
|
||||
if cacheReader.ObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
||||
// Object is not stale, so serve from cache
|
||||
return cacheReader, nil
|
||||
}
|
||||
cacheReader.Close()
|
||||
// Object is stale, so delete from cache
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// Since we got here, we are serving the request from backend,
|
||||
// and also adding the object to the cache.
|
||||
|
||||
if rs != nil {
|
||||
// We don't cache partial objects.
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
if bkErr != nil {
|
||||
return nil, bkErr
|
||||
}
|
||||
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size)
|
||||
if herr != nil {
|
||||
bkReader.Close()
|
||||
return nil, herr
|
||||
}
|
||||
|
||||
go func() {
|
||||
putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: c.getMetadata(bkReader.ObjInfo)})
|
||||
// close the write end of the pipe, so the error gets
|
||||
// propagated to getObjReader
|
||||
pipeWriter.CloseWithError(putErr)
|
||||
}()
|
||||
|
||||
cleanupBackend := func() { bkReader.Close() }
|
||||
cleanupPipe := func() { pipeReader.Close() }
|
||||
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
|
||||
return gr, nil
|
||||
}
|
||||
|
||||
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
|
||||
// stores it in the cache for serving subsequent requests.
|
||||
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
GetObjectFn := c.GetObjectFn
|
||||
GetObjectInfoFn := c.GetObjectInfoFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// stat object on backend
|
||||
objInfo, err := GetObjectInfoFn(ctx, bucket, object)
|
||||
objInfo, err := GetObjectInfoFn(ctx, bucket, object, opts)
|
||||
backendDown := backendDownError(err)
|
||||
if err != nil && !backendDown {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
@@ -200,43 +283,45 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
return err
|
||||
}
|
||||
|
||||
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
if !backendDown && !objInfo.IsCacheable() {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
|
||||
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err == nil {
|
||||
if backendDown {
|
||||
// If the backend is down, serve the request from cache.
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
if cachedObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
if startOffset != 0 || length != objInfo.Size {
|
||||
if startOffset != 0 || (length > 0 && length != objInfo.Size) {
|
||||
// We don't cache partial objects.
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
|
||||
// cache only objects < 1/100th of disk capacity
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "")
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
|
||||
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
|
||||
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
|
||||
}()
|
||||
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo))
|
||||
|
||||
opts.UserDefined = c.getMetadata(objInfo)
|
||||
err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -245,17 +330,17 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
}
|
||||
|
||||
// Returns ObjectInfo from cache if available.
|
||||
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string) (ObjectInfo, error) {
|
||||
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
getObjectInfoFn := c.GetObjectInfoFn
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return getObjectInfoFn(ctx, bucket, object)
|
||||
return getObjectInfoFn(ctx, bucket, object, opts)
|
||||
}
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return getObjectInfoFn(ctx, bucket, object)
|
||||
return getObjectInfoFn(ctx, bucket, object, opts)
|
||||
}
|
||||
objInfo, err := getObjectInfoFn(ctx, bucket, object)
|
||||
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
// Delete the cached entry if backend object was deleted.
|
||||
@@ -266,14 +351,14 @@ func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string)
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// when backend is down, serve from cache.
|
||||
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object)
|
||||
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if cerr == nil {
|
||||
return cachedObjInfo, nil
|
||||
}
|
||||
return ObjectInfo{}, BackendDown{}
|
||||
}
|
||||
// when backend is up, do a sanity check on cached object
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return objInfo, nil
|
||||
}
|
||||
@@ -560,42 +645,43 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
|
||||
}
|
||||
|
||||
// PutObject - caches the uploaded object for single Put operations
|
||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
putObjectFn := c.PutObjectFn
|
||||
data := r.Reader
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
size := r.Size()
|
||||
|
||||
// fetch from backend if there is no space on cache drive
|
||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
||||
if !dcache.diskAvailable(size) {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
// fetch from backend if cache exclude pattern or cache-control
|
||||
// directive set to exclude
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(opts.UserDefined) {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
objInfo = ObjectInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, r.MD5HexString(), r.SHA256HexString())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, r.MD5HexString(), r.SHA256HexString())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
oinfoCh := make(chan ObjectInfo)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata)
|
||||
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if perr != nil {
|
||||
pipeWriter.CloseWithError(perr)
|
||||
wPipe.CloseWithError(perr)
|
||||
@@ -608,14 +694,14 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata); err != nil {
|
||||
if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), opts); err != nil {
|
||||
wPipe.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
mwriter := io.MultiWriter(pipeWriter, wPipe)
|
||||
_, err = io.Copy(mwriter, r)
|
||||
_, err = io.Copy(mwriter, data)
|
||||
if err != nil {
|
||||
err = <-errCh
|
||||
return objInfo, err
|
||||
@@ -627,68 +713,69 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
|
||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
newMultipartUploadFn := c.NewMultipartUploadFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata)
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(opts.UserDefined) {
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata)
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata)
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// create new multipart upload in cache with same uploadID
|
||||
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID)
|
||||
dcache.NewMultipartUpload(ctx, bucket, object, uploadID, opts)
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
// PutObjectPart - uploads part to backend and cache simultaneously.
|
||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
data := r.Reader
|
||||
putObjectPartFn := c.PutObjectPartFn
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
// make sure cache has at least cacheSizeMultiplier * size available
|
||||
// make sure cache has at least size space available
|
||||
size := data.Size()
|
||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||
if !dcache.diskAvailable(size) {
|
||||
select {
|
||||
case dcache.purgeChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
info = PartInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pinfoCh := make(chan PartInfo)
|
||||
errorCh := make(chan error)
|
||||
go func() {
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader)
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if err != nil {
|
||||
close(pinfoCh)
|
||||
pipeWriter.CloseWithError(err)
|
||||
@@ -700,7 +787,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||
pinfoCh <- info
|
||||
}()
|
||||
go func() {
|
||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader); perr != nil {
|
||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil {
|
||||
wPipe.CloseWithError(perr)
|
||||
return
|
||||
}
|
||||
@@ -742,25 +829,25 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
|
||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
completeMultipartUploadFn := c.CompleteMultipartUploadFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
// perform backend operation
|
||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// create new multipart upload in cache with same uploadID
|
||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -876,14 +963,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||
cache: dcache,
|
||||
exclude: config.Exclude,
|
||||
listPool: newTreeWalkPool(globalLookupTimeout),
|
||||
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
},
|
||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object)
|
||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
||||
},
|
||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata)
|
||||
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
},
|
||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
|
||||
},
|
||||
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
||||
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
||||
@@ -900,17 +990,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
|
||||
},
|
||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata)
|
||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
|
||||
},
|
||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data)
|
||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
},
|
||||
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
||||
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
},
|
||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
},
|
||||
DeleteBucketFn: func(ctx context.Context, bucket string) error {
|
||||
return newObjectLayerFn().DeleteBucket(ctx, bucket)
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -28,21 +27,15 @@ import (
|
||||
)
|
||||
|
||||
// Initialize cache FS objects.
|
||||
func initCacheFSObjects(disk string, cacheMaxUse int, t *testing.T) (*cacheFSObjects, error) {
|
||||
newTestConfig(globalMinioDefaultRegion)
|
||||
var err error
|
||||
obj, err := newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return obj, nil
|
||||
func initCacheFSObjects(disk string, cacheMaxUse int) (*cacheFSObjects, error) {
|
||||
return newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
|
||||
}
|
||||
|
||||
// inits diskCache struct for nDisks
|
||||
func initDiskCaches(drives []string, cacheMaxUse int, t *testing.T) (*diskCache, error) {
|
||||
var cfs []*cacheFSObjects
|
||||
for _, d := range drives {
|
||||
obj, err := initCacheFSObjects(d, cacheMaxUse, t)
|
||||
obj, err := initCacheFSObjects(d, cacheMaxUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -131,11 +124,6 @@ func TestGetCacheFSMaxUse(t *testing.T) {
|
||||
|
||||
// test wildcard patterns for excluding entries from cache
|
||||
func TestCacheExclusion(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
fsDirs, err := getRandomDisks(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -146,7 +134,7 @@ func TestCacheExclusion(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cobj := cobjects.(*cacheObjects)
|
||||
globalServiceDoneCh <- struct{}{}
|
||||
GlobalServiceDoneCh <- struct{}{}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
@@ -204,17 +192,17 @@ func TestDiskCache(t *testing.T) {
|
||||
objInfo.ContentType = contentType
|
||||
objInfo.ETag = etag
|
||||
objInfo.UserDefined = httpMeta
|
||||
|
||||
var opts ObjectOptions
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -231,7 +219,7 @@ func TestDiskCache(t *testing.T) {
|
||||
t.Fatal("Cached content-type does not match")
|
||||
}
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -278,23 +266,24 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
objInfo.ContentType = contentType
|
||||
objInfo.ETag = etag
|
||||
objInfo.UserDefined = httpMeta
|
||||
opts := ObjectOptions{}
|
||||
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cache.diskAvailable(int64(size)) {
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != errDiskFull {
|
||||
t.Fatal("Cache max-use limit violated.")
|
||||
}
|
||||
} else {
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), ObjectOptions{UserDefined: httpMeta})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -311,7 +300,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
t.Fatal("Cached content-type does not match")
|
||||
}
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
182
cmd/dummy-data-generator_test.go
Normal file
182
cmd/dummy-data-generator_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var alphabets = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
|
||||
|
||||
// DummyDataGen returns a reader that repeats the bytes in `alphabets`
|
||||
// upto the desired length.
|
||||
type DummyDataGen struct {
|
||||
b []byte
|
||||
idx, length int64
|
||||
}
|
||||
|
||||
// NewDummyDataGen returns a ReadSeeker over the first `totalLength`
|
||||
// bytes from the infinite stream consisting of repeated
|
||||
// concatenations of `alphabets`.
|
||||
//
|
||||
// The skipOffset (generally = 0) can be used to skip a given number
|
||||
// of bytes from the beginning of the infinite stream. This is useful
|
||||
// to compare such streams of bytes that may be split up, because:
|
||||
//
|
||||
// Given the function:
|
||||
//
|
||||
// f := func(r io.Reader) string {
|
||||
// b, _ := ioutil.ReadAll(r)
|
||||
// return string(b)
|
||||
// }
|
||||
//
|
||||
// for example, the following is true:
|
||||
//
|
||||
// f(NewDummyDataGen(100, 0)) == f(NewDummyDataGen(50, 0)) + f(NewDummyDataGen(50, 50))
|
||||
func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
|
||||
if totalLength < 0 {
|
||||
panic("Negative length passed to DummyDataGen!")
|
||||
}
|
||||
if skipOffset < 0 {
|
||||
panic("Negative rotations are not allowed")
|
||||
}
|
||||
|
||||
skipOffset = skipOffset % int64(len(alphabets))
|
||||
as := make([]byte, 2*len(alphabets))
|
||||
copy(as, alphabets)
|
||||
copy(as[len(alphabets):], alphabets)
|
||||
b := as[skipOffset : skipOffset+int64(len(alphabets))]
|
||||
return &DummyDataGen{
|
||||
length: totalLength,
|
||||
b: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DummyDataGen) Read(b []byte) (n int, err error) {
|
||||
k := len(b)
|
||||
numLetters := int64(len(d.b))
|
||||
for k > 0 && d.idx < d.length {
|
||||
w := copy(b[len(b)-k:], d.b[d.idx%numLetters:])
|
||||
k -= w
|
||||
d.idx += int64(w)
|
||||
n += w
|
||||
}
|
||||
if d.idx >= d.length {
|
||||
extraBytes := d.idx - d.length
|
||||
n -= int(extraBytes)
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
if offset < 0 {
|
||||
return 0, errors.New("Invalid offset")
|
||||
}
|
||||
d.idx = offset
|
||||
case io.SeekCurrent:
|
||||
if d.idx+offset < 0 {
|
||||
return 0, errors.New("Invalid offset")
|
||||
}
|
||||
d.idx += offset
|
||||
case io.SeekEnd:
|
||||
if d.length+offset < 0 {
|
||||
return 0, errors.New("Invalid offset")
|
||||
}
|
||||
d.idx = d.length + offset
|
||||
}
|
||||
return d.idx, nil
|
||||
}
|
||||
|
||||
func TestDummyDataGenerator(t *testing.T) {
|
||||
readAll := func(r io.Reader) string {
|
||||
b, _ := ioutil.ReadAll(r)
|
||||
return string(b)
|
||||
}
|
||||
checkEq := func(a, b string) {
|
||||
if a != b {
|
||||
t.Fatalf("Unexpected equality failure")
|
||||
}
|
||||
}
|
||||
|
||||
checkEq(readAll(NewDummyDataGen(0, 0)), "")
|
||||
|
||||
checkEq(readAll(NewDummyDataGen(10, 0)), readAll(NewDummyDataGen(10, int64(len(alphabets)))))
|
||||
|
||||
checkEq(readAll(NewDummyDataGen(100, 0)), readAll(NewDummyDataGen(50, 0))+readAll(NewDummyDataGen(50, 50)))
|
||||
|
||||
r := NewDummyDataGen(100, 0)
|
||||
r.Seek(int64(len(alphabets)), 0)
|
||||
checkEq(readAll(r), readAll(NewDummyDataGen(100-int64(len(alphabets)), 0)))
|
||||
}
|
||||
|
||||
// Compares all the bytes returned by the given readers. Any Read
|
||||
// errors cause a `false` result. A string describing the error is
|
||||
// also returned.
|
||||
func cmpReaders(r1, r2 io.Reader) (bool, string) {
|
||||
bufLen := 32 * 1024
|
||||
b1, b2 := make([]byte, bufLen), make([]byte, bufLen)
|
||||
for i := 0; true; i++ {
|
||||
n1, e1 := io.ReadFull(r1, b1)
|
||||
n2, e2 := io.ReadFull(r2, b2)
|
||||
if n1 != n2 {
|
||||
return false, fmt.Sprintf("Read %d != %d bytes from the readers", n1, n2)
|
||||
}
|
||||
if !bytes.Equal(b1[:n1], b2[:n2]) {
|
||||
return false, fmt.Sprintf("After reading %d equal buffers (32Kib each), we got the following two strings:\n%v\n%v\n",
|
||||
i, b1, b2)
|
||||
}
|
||||
// Check if stream has ended
|
||||
if (e1 == io.ErrUnexpectedEOF && e2 == io.ErrUnexpectedEOF) || (e1 == io.EOF && e2 == io.EOF) {
|
||||
break
|
||||
}
|
||||
if e1 != nil || e2 != nil {
|
||||
return false, fmt.Sprintf("Got unexpected error values: %v == %v", e1, e2)
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func TestCmpReaders(t *testing.T) {
|
||||
{
|
||||
r1 := bytes.NewBuffer([]byte("abc"))
|
||||
r2 := bytes.NewBuffer([]byte("abc"))
|
||||
ok, msg := cmpReaders(r1, r2)
|
||||
if !(ok && msg == "") {
|
||||
t.Fatalf("unexpected")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
r1 := bytes.NewBuffer([]byte("abc"))
|
||||
r2 := bytes.NewBuffer([]byte("abcd"))
|
||||
ok, _ := cmpReaders(r1, r2)
|
||||
if ok {
|
||||
t.Fatalf("unexpected")
|
||||
}
|
||||
}
|
||||
}
|
||||
234
cmd/dummy-handlers.go
Normal file
234
cmd/dummy-handlers.go
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018, 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy tagging XML.
|
||||
// These variables shouldn't be used elsewhere.
|
||||
// They are only defined to be used in this file alone.
|
||||
|
||||
type tagging struct {
|
||||
XMLName xml.Name `xml:"Tagging"`
|
||||
TagSet tagSet `xml:"TagSet"`
|
||||
}
|
||||
|
||||
type tagSet struct {
|
||||
Tag []tagElem `xml:"Tag"`
|
||||
}
|
||||
|
||||
type tagElem struct {
|
||||
Key string `xml:"Key"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// GetBucketWebsite - GET bucket website, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketVersioning - GET bucket versioning, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketAccelerate - GET bucket accelerate, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketRequestPaymentHandler - GET bucket requestPayment, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketLoggingHandler - GET bucket logging, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketLifecycleHandler - GET bucket lifecycle, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketReplicationHandler - GET bucket replication, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteBucketTaggingHandler - DELETE bucket tagging, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteBucketWebsiteHandler - DELETE bucket website, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
type allowedMethod string
|
||||
|
||||
// Define strings
|
||||
const (
|
||||
GET allowedMethod = http.MethodGet
|
||||
PUT allowedMethod = http.MethodPut
|
||||
HEAD allowedMethod = http.MethodHead
|
||||
POST allowedMethod = http.MethodPost
|
||||
DELETE allowedMethod = http.MethodDelete
|
||||
)
|
||||
|
||||
// GetBucketCorsHandler - GET bucket cors, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketCorsHandler")
|
||||
|
||||
type corsRule struct {
|
||||
AllowedHeaders []string `xml:"AllowedHeaders"`
|
||||
AllowedMethods []allowedMethod `xml:"AllowedMethod"`
|
||||
AllowedOrigins []string `xml:"AllowedOrigin"`
|
||||
ExposeHeaders []string `xml:"ExposeHeader"`
|
||||
MaxAgeSeconds int64 `xml:"MaxAgeSeconds"`
|
||||
}
|
||||
|
||||
type corsConfiguration struct {
|
||||
XMLName xml.Name `xml:"CORSConfiguration"`
|
||||
CorsRule []corsRule `xml:"CORSRule"`
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
cors := &corsConfiguration{}
|
||||
if err := xml.NewEncoder(w).Encode(cors); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketTaggingHandler - GET bucket tagging, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketTagging")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketTagging if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags := &tagging{}
|
||||
tags.TagSet.Tag = append(tags.TagSet.Tag, tagElem{})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(tags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectTaggingHandler - GET object tagging, a dummy api
|
||||
func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetObjectTagging")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getObjectTagging if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if object exists, before proceeding further...
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags := &tagging{}
|
||||
tags.TagSet.Tag = append(tags.TagSet.Tag, tagElem{})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(tags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
type DummyObjectLayer struct{}
|
||||
|
||||
func (api *DummyObjectLayer) Shutdown(context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) StorageInfo(context.Context) (si StorageInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) MakeBucketWithLocation(ctx context.Context, bucket string, location string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) DeleteBucket(ctx context.Context, bucket string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) DeleteObject(ctx context.Context, bucket, object string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo) (info PartInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) HealFormat(ctx context.Context, dryRun bool) (item madmin.HealResultItem, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) HealBucket(ctx context.Context, bucket string, dryRun bool) (items []madmin.HealResultItem, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) HealObject(ctx context.Context, bucket, object string, dryRun bool) (item madmin.HealResultItem, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (info ListObjectsInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) (info []VolumeLockInfo, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) ClearLocks(context.Context, []VolumeLockInfo) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) SetBucketPolicy(context.Context, string, *policy.Policy) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) GetBucketPolicy(context.Context, string) (bucketPolicy *policy.Policy, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) RefreshBucketPolicy(context.Context, string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) DeleteBucketPolicy(context.Context, string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) IsNotificationSupported() (b bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (api *DummyObjectLayer) IsEncryptionSupported() (b bool) {
|
||||
return
|
||||
}
|
||||
1177
cmd/encryption-v1.go
1177
cmd/encryption-v1.go
File diff suppressed because it is too large
Load Diff
@@ -18,21 +18,55 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
var hasServerSideEncryptionHeaderTests = []struct {
|
||||
headers map[string]string
|
||||
sseRequest bool
|
||||
}{
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "AES256", crypto.SSECKey: "key", crypto.SSECKeyMD5: "md5"}, sseRequest: true}, // 0
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "AES256"}, sseRequest: true}, // 1
|
||||
{headers: map[string]string{crypto.SSECKey: "key"}, sseRequest: true}, // 2
|
||||
{headers: map[string]string{crypto.SSECKeyMD5: "md5"}, sseRequest: true}, // 3
|
||||
{headers: map[string]string{}, sseRequest: false}, // 4
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm + " ": "AES256", " " + crypto.SSECopyKey: "key", crypto.SSECopyKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm: "", crypto.SSECopyKey: "", crypto.SSECopyKeyMD5: ""}, sseRequest: false}, // 6
|
||||
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: true}, // 7
|
||||
}
|
||||
|
||||
func TestHasServerSideEncryptionHeader(t *testing.T) {
|
||||
for i, test := range hasServerSideEncryptionHeaderTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
if hasServerSideEncryptionHeader(headers) != test.sseRequest {
|
||||
t.Errorf("Test %d: Expected hasServerSideEncryptionHeader to return %v", i, test.sseRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var hasSSECopyCustomerHeaderTests = []struct {
|
||||
headers map[string]string
|
||||
sseRequest bool
|
||||
}{
|
||||
{headers: map[string]string{SSECopyCustomerAlgorithm: "AES256", SSECopyCustomerKey: "key", SSECopyCustomerKeyMD5: "md5"}, sseRequest: true}, // 0
|
||||
{headers: map[string]string{SSECopyCustomerAlgorithm: "AES256"}, sseRequest: true}, // 1
|
||||
{headers: map[string]string{SSECopyCustomerKey: "key"}, sseRequest: true}, // 2
|
||||
{headers: map[string]string{SSECopyCustomerKeyMD5: "md5"}, sseRequest: true}, // 3
|
||||
{headers: map[string]string{}, sseRequest: false}, // 4
|
||||
{headers: map[string]string{SSECopyCustomerAlgorithm + " ": "AES256", " " + SSECopyCustomerKey: "key", SSECopyCustomerKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{SSECopyCustomerAlgorithm: "", SSECopyCustomerKey: "", SSECopyCustomerKeyMD5: ""}, sseRequest: false}, // 6
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm: "AES256", crypto.SSECopyKey: "key", crypto.SSECopyKeyMD5: "md5"}, sseRequest: true}, // 0
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm: "AES256"}, sseRequest: true}, // 1
|
||||
{headers: map[string]string{crypto.SSECopyKey: "key"}, sseRequest: true}, // 2
|
||||
{headers: map[string]string{crypto.SSECopyKeyMD5: "md5"}, sseRequest: true}, // 3
|
||||
{headers: map[string]string{}, sseRequest: false}, // 4
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm + " ": "AES256", " " + crypto.SSECopyKey: "key", crypto.SSECopyKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{crypto.SSECopyAlgorithm: "", crypto.SSECopyKey: "", crypto.SSECopyKeyMD5: ""}, sseRequest: true}, // 6
|
||||
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: false}, // 7
|
||||
|
||||
}
|
||||
|
||||
func TestIsSSECopyCustomerRequest(t *testing.T) {
|
||||
@@ -41,8 +75,8 @@ func TestIsSSECopyCustomerRequest(t *testing.T) {
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
if hasSSECopyCustomerHeader(headers) != test.sseRequest {
|
||||
t.Errorf("Test %d: Expected hasSSECopyCustomerHeader to return %v", i, test.sseRequest)
|
||||
if crypto.SSECopy.IsRequested(headers) != test.sseRequest {
|
||||
t.Errorf("Test %d: Expected crypto.SSECopy.IsRequested to return %v", i, test.sseRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -51,250 +85,49 @@ var hasSSECustomerHeaderTests = []struct {
|
||||
headers map[string]string
|
||||
sseRequest bool
|
||||
}{
|
||||
{headers: map[string]string{SSECustomerAlgorithm: "AES256", SSECustomerKey: "key", SSECustomerKeyMD5: "md5"}, sseRequest: true}, // 0
|
||||
{headers: map[string]string{SSECustomerAlgorithm: "AES256"}, sseRequest: true}, // 1
|
||||
{headers: map[string]string{SSECustomerKey: "key"}, sseRequest: true}, // 2
|
||||
{headers: map[string]string{SSECustomerKeyMD5: "md5"}, sseRequest: true}, // 3
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "AES256", crypto.SSECKey: "key", crypto.SSECKeyMD5: "md5"}, sseRequest: true}, // 0
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "AES256"}, sseRequest: true}, // 1
|
||||
{headers: map[string]string{crypto.SSECKey: "key"}, sseRequest: true}, // 2
|
||||
{headers: map[string]string{crypto.SSECKeyMD5: "md5"}, sseRequest: true}, // 3
|
||||
{headers: map[string]string{}, sseRequest: false}, // 4
|
||||
{headers: map[string]string{SSECustomerAlgorithm + " ": "AES256", " " + SSECustomerKey: "key", SSECustomerKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{SSECustomerAlgorithm: "", SSECustomerKey: "", SSECustomerKeyMD5: ""}, sseRequest: false}, // 6
|
||||
{headers: map[string]string{crypto.SSECAlgorithm + " ": "AES256", " " + crypto.SSECKey: "key", crypto.SSECKeyMD5 + " ": "md5"}, sseRequest: false}, // 5
|
||||
{headers: map[string]string{crypto.SSECAlgorithm: "", crypto.SSECKey: "", crypto.SSECKeyMD5: ""}, sseRequest: true}, // 6
|
||||
{headers: map[string]string{crypto.SSEHeader: ""}, sseRequest: false}, // 7
|
||||
|
||||
}
|
||||
|
||||
func TesthasSSECustomerHeader(t *testing.T) {
|
||||
func TestHasSSECustomerHeader(t *testing.T) {
|
||||
for i, test := range hasSSECustomerHeaderTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
if hasSSECustomerHeader(headers) != test.sseRequest {
|
||||
if crypto.SSEC.IsRequested(headers) != test.sseRequest {
|
||||
t.Errorf("Test %d: Expected hasSSECustomerHeader to return %v", i, test.sseRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parseSSECustomerRequestTests = []struct {
|
||||
headers map[string]string
|
||||
useTLS bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: true, err: nil,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 1
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: false, err: errInsecureSSERequest,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES 256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 2
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: true, err: errInvalidSSEAlgorithm,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 3
|
||||
SSECustomerKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: errSSEKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: " jE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 4
|
||||
SSECustomerKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: errInvalidSSEKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 5
|
||||
SSECustomerKeyMD5: " +jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: errSSEKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 6
|
||||
SSECustomerKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
useTLS: true, err: errInvalidSSEKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "", // 7
|
||||
SSECustomerKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
useTLS: true, err: errMissingSSEKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
|
||||
SSECustomerKeyMD5: "",
|
||||
},
|
||||
useTLS: true, err: errMissingSSEKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseSSECustomerRequest(t *testing.T) {
|
||||
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
|
||||
for i, test := range parseSSECustomerRequestTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
request := &http.Request{}
|
||||
request.Header = headers
|
||||
globalIsSSL = test.useTLS
|
||||
|
||||
_, err := ParseSSECustomerRequest(request)
|
||||
if err != test.err {
|
||||
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
|
||||
}
|
||||
key := request.Header.Get(SSECustomerKey)
|
||||
if (err == nil || err == errSSEKeyMD5Mismatch) && key != "" {
|
||||
t.Errorf("Test %d: Client key survived parsing - found key: %v", i, key)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
var parseSSECopyCustomerRequestTests = []struct {
|
||||
headers map[string]string
|
||||
useTLS bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 0
|
||||
SSECopyCustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: true, err: nil,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 1
|
||||
SSECopyCustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: false, err: errInsecureSSERequest,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES 256",
|
||||
SSECopyCustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=", // 2
|
||||
SSECopyCustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
useTLS: true, err: errInvalidSSEAlgorithm,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 3
|
||||
SSECopyCustomerKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: errSSEKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: " jE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 4
|
||||
SSECopyCustomerKeyMD5: "H+jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: errInvalidSSEKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "NjE0SL87s+ZhYtaTrg5eI5cjhCQLGPVMKenPG2bCJFw=", // 5
|
||||
SSECopyCustomerKeyMD5: " +jq/LwEOEO90YtiTuNFVw==",
|
||||
},
|
||||
useTLS: true, err: errSSEKeyMD5Mismatch,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 6
|
||||
SSECopyCustomerKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
useTLS: true, err: errInvalidSSEKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "", // 7
|
||||
SSECopyCustomerKeyMD5: "9KPgDdZNTHimuYCwnJTp5g==",
|
||||
},
|
||||
useTLS: true, err: errMissingSSEKey,
|
||||
},
|
||||
{
|
||||
headers: map[string]string{
|
||||
SSECopyCustomerAlgorithm: "AES256",
|
||||
SSECopyCustomerKey: "vFQ9ScFOF6Tu/BfzMS+rVMvlZGJHi5HmGJenJfrfKI45", // 8
|
||||
SSECopyCustomerKeyMD5: "",
|
||||
},
|
||||
useTLS: true, err: errMissingSSEKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseSSECopyCustomerRequest(t *testing.T) {
|
||||
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
|
||||
for i, test := range parseSSECopyCustomerRequestTests {
|
||||
headers := http.Header{}
|
||||
for k, v := range test.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
request := &http.Request{}
|
||||
request.Header = headers
|
||||
globalIsSSL = test.useTLS
|
||||
|
||||
_, err := ParseSSECopyCustomerRequest(request)
|
||||
if err != test.err {
|
||||
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
|
||||
}
|
||||
key := request.Header.Get(SSECopyCustomerKey)
|
||||
if (err == nil || err == errSSEKeyMD5Mismatch) && key != "" {
|
||||
t.Errorf("Test %d: Client key survived parsing - found key: %v", i, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var encryptRequestTests = []struct {
|
||||
header map[string]string
|
||||
metadata map[string]string
|
||||
}{
|
||||
{
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{},
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -308,20 +141,18 @@ func TestEncryptRequest(t *testing.T) {
|
||||
for k, v := range test.header {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
_, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
||||
}
|
||||
if key, ok := test.metadata[SSECustomerKey]; ok {
|
||||
t.Errorf("Test %d: Client provided key survived in metadata - key: %s", i, key)
|
||||
}
|
||||
if kdf, ok := test.metadata[ServerSideEncryptionSealAlgorithm]; !ok {
|
||||
if kdf, ok := test.metadata[crypto.SSESealAlgorithm]; !ok {
|
||||
t.Errorf("Test %d: ServerSideEncryptionKDF must be part of metadata: %v", i, kdf)
|
||||
}
|
||||
if iv, ok := test.metadata[ServerSideEncryptionIV]; !ok {
|
||||
t.Errorf("Test %d: ServerSideEncryptionIV must be part of metadata: %v", i, iv)
|
||||
if iv, ok := test.metadata[crypto.SSEIV]; !ok {
|
||||
t.Errorf("Test %d: crypto.SSEIV must be part of metadata: %v", i, iv)
|
||||
}
|
||||
if mac, ok := test.metadata[ServerSideEncryptionSealedKey]; !ok {
|
||||
if mac, ok := test.metadata[crypto.SSECSealedKey]; !ok {
|
||||
t.Errorf("Test %d: ServerSideEncryptionKeyMAC must be part of metadata: %v", i, mac)
|
||||
}
|
||||
}
|
||||
@@ -337,14 +168,14 @@ var decryptRequestTests = []struct {
|
||||
bucket: "bucket",
|
||||
object: "object",
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
|
||||
SSECustomerKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
|
||||
crypto.SSECKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
ServerSideEncryptionIV: "7nQqotA8xgrPx6QK7Ap3GCfjKitqJSrGP7xzgErSJlw=",
|
||||
ServerSideEncryptionSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA==",
|
||||
crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
crypto.SSEIV: "7nQqotA8xgrPx6QK7Ap3GCfjKitqJSrGP7xzgErSJlw=",
|
||||
crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA==",
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
@@ -352,14 +183,14 @@ var decryptRequestTests = []struct {
|
||||
bucket: "bucket",
|
||||
object: "object",
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
|
||||
SSECustomerKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
|
||||
crypto.SSECKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareV2HmacSha256,
|
||||
ServerSideEncryptionIV: "qEqmsONcorqlcZXJxaw32H04eyXyXwUgjHzlhkaIYrU=",
|
||||
ServerSideEncryptionSealedKey: "IAAfAIM14ugTGcM/dIrn4iQMrkl1sjKyeBQ8FBEvRebYj8vWvxG+0cJRpC6NXRU1wJN50JaUOATjO7kz0wZ2mA==",
|
||||
crypto.SSESealAlgorithm: SSESealAlgorithmDareV2HmacSha256,
|
||||
crypto.SSEIV: "qEqmsONcorqlcZXJxaw32H04eyXyXwUgjHzlhkaIYrU=",
|
||||
crypto.SSECSealedKey: "IAAfAIM14ugTGcM/dIrn4iQMrkl1sjKyeBQ8FBEvRebYj8vWvxG+0cJRpC6NXRU1wJN50JaUOATjO7kz0wZ2mA==",
|
||||
},
|
||||
shouldFail: false,
|
||||
},
|
||||
@@ -367,14 +198,14 @@ var decryptRequestTests = []struct {
|
||||
bucket: "bucket",
|
||||
object: "object",
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
ServerSideEncryptionSealAlgorithm: "HMAC-SHA3",
|
||||
ServerSideEncryptionIV: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
ServerSideEncryptionSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hcs4rR9z/CUuPqu5N4=",
|
||||
crypto.SSESealAlgorithm: "HMAC-SHA3",
|
||||
crypto.SSEIV: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hcs4rR9z/CUuPqu5N4=",
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
@@ -382,14 +213,14 @@ var decryptRequestTests = []struct {
|
||||
bucket: "bucket",
|
||||
object: "object",
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
ServerSideEncryptionIV: "RrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
ServerSideEncryptionSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hcs4rR9z/CUuPqu5N4=",
|
||||
crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
crypto.SSEIV: "RrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hcs4rR9z/CUuPqu5N4=",
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
@@ -397,14 +228,14 @@ var decryptRequestTests = []struct {
|
||||
bucket: "bucket",
|
||||
object: "object",
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
SSECustomerKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
ServerSideEncryptionIV: "XAm0dRrJsEsyPb1UuFNezv1bl9ehxuYsgUVC/MUctE2k=",
|
||||
ServerSideEncryptionSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hds4rR9z/CUuPqu5N4=",
|
||||
crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
crypto.SSEIV: "XAm0dRrJsEsyPb1UuFNezv1bl9ehxuYsgUVC/MUctE2k=",
|
||||
crypto.SSECSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hds4rR9z/CUuPqu5N4=",
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
@@ -412,14 +243,14 @@ var decryptRequestTests = []struct {
|
||||
bucket: "bucket",
|
||||
object: "object-2",
|
||||
header: map[string]string{
|
||||
SSECustomerAlgorithm: "AES256",
|
||||
SSECustomerKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
|
||||
SSECustomerKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
|
||||
crypto.SSECAlgorithm: "AES256",
|
||||
crypto.SSECKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
|
||||
crypto.SSECKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
|
||||
},
|
||||
metadata: map[string]string{
|
||||
ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareV2HmacSha256,
|
||||
ServerSideEncryptionIV: "qEqmsONcorqlcZXJxaw32H04eyXyXwUgjHzlhkaIYrU=",
|
||||
ServerSideEncryptionSealedKey: "IAAfAIM14ugTGcM/dIrn4iQMrkl1sjKyeBQ8FBEvRebYj8vWvxG+0cJRpC6NXRU1wJN50JaUOATjO7kz0wZ2mA==",
|
||||
crypto.SSESealAlgorithm: SSESealAlgorithmDareV2HmacSha256,
|
||||
crypto.SSEIV: "qEqmsONcorqlcZXJxaw32H04eyXyXwUgjHzlhkaIYrU=",
|
||||
crypto.SSECSealedKey: "IAAfAIM14ugTGcM/dIrn4iQMrkl1sjKyeBQ8FBEvRebYj8vWvxG+0cJRpC6NXRU1wJN50JaUOATjO7kz0wZ2mA==",
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
@@ -441,16 +272,16 @@ func TestDecryptRequest(t *testing.T) {
|
||||
if err == nil && test.shouldFail {
|
||||
t.Fatalf("Test %d: should fail but passed", i)
|
||||
}
|
||||
if key, ok := test.metadata[SSECustomerKey]; ok {
|
||||
if key, ok := test.metadata[crypto.SSECKey]; ok {
|
||||
t.Errorf("Test %d: Client provided key survived in metadata - key: %s", i, key)
|
||||
}
|
||||
if kdf, ok := test.metadata[ServerSideEncryptionSealAlgorithm]; ok && !test.shouldFail {
|
||||
if kdf, ok := test.metadata[crypto.SSESealAlgorithm]; ok && !test.shouldFail {
|
||||
t.Errorf("Test %d: ServerSideEncryptionKDF should not be part of metadata: %v", i, kdf)
|
||||
}
|
||||
if iv, ok := test.metadata[ServerSideEncryptionIV]; ok && !test.shouldFail {
|
||||
t.Errorf("Test %d: ServerSideEncryptionIV should not be part of metadata: %v", i, iv)
|
||||
if iv, ok := test.metadata[crypto.SSEIV]; ok && !test.shouldFail {
|
||||
t.Errorf("Test %d: crypto.SSEIV should not be part of metadata: %v", i, iv)
|
||||
}
|
||||
if mac, ok := test.metadata[ServerSideEncryptionSealedKey]; ok && !test.shouldFail {
|
||||
if mac, ok := test.metadata[crypto.SSECSealedKey]; ok && !test.shouldFail {
|
||||
t.Errorf("Test %d: ServerSideEncryptionKeyMAC should not be part of metadata: %v", i, mac)
|
||||
}
|
||||
}
|
||||
@@ -459,48 +290,471 @@ func TestDecryptRequest(t *testing.T) {
|
||||
var decryptObjectInfoTests = []struct {
|
||||
info ObjectInfo
|
||||
headers http.Header
|
||||
expErr APIErrorCode
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
info: ObjectInfo{Size: 100},
|
||||
headers: http.Header{},
|
||||
expErr: ErrNone,
|
||||
expErr: nil,
|
||||
},
|
||||
{
|
||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
headers: http.Header{SSECustomerAlgorithm: []string{SSECustomerAlgorithmAES256}},
|
||||
expErr: ErrNone,
|
||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
||||
expErr: nil,
|
||||
},
|
||||
{
|
||||
info: ObjectInfo{Size: 0, UserDefined: map[string]string{ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
headers: http.Header{SSECustomerAlgorithm: []string{SSECustomerAlgorithmAES256}},
|
||||
expErr: ErrNone,
|
||||
info: ObjectInfo{Size: 0, UserDefined: map[string]string{crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
||||
expErr: nil,
|
||||
},
|
||||
{
|
||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA=="}},
|
||||
headers: http.Header{},
|
||||
expErr: ErrSSEEncryptedObject,
|
||||
expErr: errEncryptedObject,
|
||||
},
|
||||
{
|
||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
|
||||
headers: http.Header{SSECustomerAlgorithm: []string{SSECustomerAlgorithmAES256}},
|
||||
expErr: ErrInvalidEncryptionParameters,
|
||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
||||
expErr: errInvalidEncryptionParameters,
|
||||
},
|
||||
{
|
||||
info: ObjectInfo{Size: 31, UserDefined: map[string]string{ServerSideEncryptionSealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
headers: http.Header{SSECustomerAlgorithm: []string{SSECustomerAlgorithmAES256}},
|
||||
expErr: ErrObjectTampered,
|
||||
info: ObjectInfo{Size: 31, UserDefined: map[string]string{crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256}},
|
||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
||||
expErr: errObjectTampered,
|
||||
},
|
||||
}
|
||||
|
||||
func TestDecryptObjectInfo(t *testing.T) {
|
||||
for i, test := range decryptObjectInfoTests {
|
||||
if err, encrypted := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
|
||||
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
|
||||
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
||||
} else if enc := test.info.IsEncrypted(); encrypted && enc != encrypted {
|
||||
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
||||
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
||||
} else if !encrypted && enc != encrypted {
|
||||
t.Errorf("Test %d: Decryption thinks object is not encrypted but it is", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for issue reproduced when getting the right encrypted
|
||||
// offset of the object.
|
||||
func TestGetDecryptedRange_Issue50(t *testing.T) {
|
||||
rs, err := parseRequestRangeSpec("bytes=594870256-594870263")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
Bucket: "bucket",
|
||||
Name: "object",
|
||||
Size: 595160760,
|
||||
UserDefined: map[string]string{
|
||||
crypto.SSEMultipart: "",
|
||||
crypto.SSEIV: "HTexa=",
|
||||
crypto.SSESealAlgorithm: "DAREv2-HMAC-SHA256",
|
||||
crypto.SSECSealedKey: "IAA8PGAA==",
|
||||
ReservedMetadataPrefix + "actual-size": "594870264",
|
||||
"content-type": "application/octet-stream",
|
||||
"etag": "166b1545b4c1535294ee0686678bea8c-2",
|
||||
},
|
||||
Parts: []ObjectPartInfo{
|
||||
{
|
||||
Number: 1,
|
||||
Name: "part.1",
|
||||
ETag: "etag1",
|
||||
Size: 297580380,
|
||||
ActualSize: 297435132,
|
||||
},
|
||||
{
|
||||
Number: 2,
|
||||
Name: "part.2",
|
||||
ETag: "etag2",
|
||||
Size: 297580380,
|
||||
ActualSize: 297435132,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
encOff, encLength, skipLen, seqNumber, partStart, err := objInfo.GetDecryptedRange(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Test: failed %s", err)
|
||||
}
|
||||
if encOff != 595127964 {
|
||||
t.Fatalf("Test: expected %d, got %d", 595127964, encOff)
|
||||
}
|
||||
if encLength != 32796 {
|
||||
t.Fatalf("Test: expected %d, got %d", 32796, encLength)
|
||||
}
|
||||
if skipLen != 32756 {
|
||||
t.Fatalf("Test: expected %d, got %d", 32756, skipLen)
|
||||
}
|
||||
if seqNumber != 4538 {
|
||||
t.Fatalf("Test: expected %d, got %d", 4538, seqNumber)
|
||||
}
|
||||
if partStart != 1 {
|
||||
t.Fatalf("Test: expected %d, got %d", 1, partStart)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDecryptedRange(t *testing.T) {
|
||||
var (
|
||||
pkgSz = int64(64) * humanize.KiByte
|
||||
minPartSz = int64(5) * humanize.MiByte
|
||||
maxPartSz = int64(5) * humanize.GiByte
|
||||
|
||||
getEncSize = func(s int64) int64 {
|
||||
v, _ := sio.EncryptedSize(uint64(s))
|
||||
return int64(v)
|
||||
}
|
||||
udMap = func(isMulti bool) map[string]string {
|
||||
m := map[string]string{
|
||||
crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256,
|
||||
crypto.SSEMultipart: "1",
|
||||
}
|
||||
if !isMulti {
|
||||
delete(m, crypto.SSEMultipart)
|
||||
}
|
||||
return m
|
||||
}
|
||||
)
|
||||
|
||||
// Single part object tests
|
||||
var (
|
||||
mkSPObj = func(s int64) ObjectInfo {
|
||||
return ObjectInfo{
|
||||
Size: getEncSize(s),
|
||||
UserDefined: udMap(false),
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
testSP := []struct {
|
||||
decSz int64
|
||||
oi ObjectInfo
|
||||
}{
|
||||
{0, mkSPObj(0)},
|
||||
{1, mkSPObj(1)},
|
||||
{pkgSz - 1, mkSPObj(pkgSz - 1)},
|
||||
{pkgSz, mkSPObj(pkgSz)},
|
||||
{2*pkgSz - 1, mkSPObj(2*pkgSz - 1)},
|
||||
{minPartSz, mkSPObj(minPartSz)},
|
||||
{maxPartSz, mkSPObj(maxPartSz)},
|
||||
}
|
||||
|
||||
for i, test := range testSP {
|
||||
{
|
||||
// nil range
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(nil)
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
if skip != 0 || sn != 0 || ps != 0 || o != 0 || l != getEncSize(test.decSz) {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
|
||||
}
|
||||
}
|
||||
|
||||
if test.decSz >= 10 {
|
||||
// first 10 bytes
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, 0, 9})
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
var rLen = pkgSz + 32
|
||||
if test.decSz < pkgSz {
|
||||
rLen = test.decSz + 32
|
||||
}
|
||||
if skip != 0 || sn != 0 || ps != 0 || o != 0 || l != rLen {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
|
||||
}
|
||||
}
|
||||
|
||||
kb32 := int64(32) * humanize.KiByte
|
||||
if test.decSz >= (64+32)*humanize.KiByte {
|
||||
// Skip the first 32Kib, and read the next 64Kib
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, kb32, 3*kb32 - 1})
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
var rLen = (pkgSz + 32) * 2
|
||||
if test.decSz < 2*pkgSz {
|
||||
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32)
|
||||
}
|
||||
if skip != kb32 || sn != 0 || ps != 0 || o != 0 || l != rLen {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
|
||||
}
|
||||
}
|
||||
|
||||
if test.decSz >= (64*2+32)*humanize.KiByte {
|
||||
// Skip the first 96Kib and read the next 64Kib
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, 3 * kb32, 5*kb32 - 1})
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
var rLen = (pkgSz + 32) * 2
|
||||
if test.decSz-pkgSz < 2*pkgSz {
|
||||
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2)
|
||||
}
|
||||
if skip != kb32 || sn != 1 || ps != 0 || o != pkgSz+32 || l != rLen {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Multipart object tests
|
||||
var (
|
||||
// make a multipart object-info given part sizes
|
||||
mkMPObj = func(sizes []int64) ObjectInfo {
|
||||
r := make([]ObjectPartInfo, len(sizes))
|
||||
sum := int64(0)
|
||||
for i, s := range sizes {
|
||||
r[i].Number = i
|
||||
r[i].Size = int64(getEncSize(s))
|
||||
sum += r[i].Size
|
||||
}
|
||||
return ObjectInfo{
|
||||
Size: sum,
|
||||
UserDefined: udMap(true),
|
||||
Parts: r,
|
||||
}
|
||||
}
|
||||
// Simple useful utilities
|
||||
repeat = func(k int64, n int) []int64 {
|
||||
a := []int64{}
|
||||
for i := 0; i < n; i++ {
|
||||
a = append(a, k)
|
||||
}
|
||||
return a
|
||||
}
|
||||
lsum = func(s []int64) int64 {
|
||||
sum := int64(0)
|
||||
for _, i := range s {
|
||||
if i < 0 {
|
||||
return -1
|
||||
}
|
||||
sum += i
|
||||
}
|
||||
return sum
|
||||
}
|
||||
esum = func(oi ObjectInfo) int64 {
|
||||
sum := int64(0)
|
||||
for _, i := range oi.Parts {
|
||||
sum += i.Size
|
||||
}
|
||||
return sum
|
||||
}
|
||||
)
|
||||
|
||||
s1 := []int64{5487701, 5487799, 3}
|
||||
s2 := repeat(5487701, 5)
|
||||
s3 := repeat(maxPartSz, 10000)
|
||||
testMPs := []struct {
|
||||
decSizes []int64
|
||||
oi ObjectInfo
|
||||
}{
|
||||
{s1, mkMPObj(s1)},
|
||||
{s2, mkMPObj(s2)},
|
||||
{s3, mkMPObj(s3)},
|
||||
}
|
||||
|
||||
// This function is a reference (re-)implementation of
|
||||
// decrypted range computation, written solely for the purpose
|
||||
// of the unit tests.
|
||||
//
|
||||
// `s` gives the decrypted part sizes, and the other
|
||||
// parameters describe the desired read segment. When
|
||||
// `isFromEnd` is true, `skipLen` argument is ignored.
|
||||
decryptedRangeRef := func(s []int64, skipLen, readLen int64, isFromEnd bool) (o, l, skip int64, sn uint32, ps int) {
|
||||
oSize := lsum(s)
|
||||
if isFromEnd {
|
||||
skipLen = oSize - readLen
|
||||
}
|
||||
if skipLen < 0 || readLen < 0 || oSize < 0 || skipLen+readLen > oSize {
|
||||
t.Fatalf("Impossible read specified: %d %d %d", skipLen, readLen, oSize)
|
||||
}
|
||||
|
||||
var cumulativeSum, cumulativeEncSum int64
|
||||
toRead := readLen
|
||||
readStart := false
|
||||
for i, v := range s {
|
||||
partOffset := int64(0)
|
||||
partDarePkgOffset := int64(0)
|
||||
if !readStart && cumulativeSum+v > skipLen {
|
||||
// Read starts at the current part
|
||||
readStart = true
|
||||
|
||||
partOffset = skipLen - cumulativeSum
|
||||
|
||||
// All return values except `l` are
|
||||
// calculated here.
|
||||
sn = uint32(partOffset / pkgSz)
|
||||
skip = partOffset % pkgSz
|
||||
ps = i
|
||||
o = cumulativeEncSum + int64(sn)*(pkgSz+32)
|
||||
|
||||
partDarePkgOffset = partOffset - skip
|
||||
}
|
||||
if readStart {
|
||||
currentPartBytes := v - partOffset
|
||||
currentPartDareBytes := v - partDarePkgOffset
|
||||
if currentPartBytes < toRead {
|
||||
toRead -= currentPartBytes
|
||||
l += getEncSize(currentPartDareBytes)
|
||||
} else {
|
||||
// current part has the last
|
||||
// byte required
|
||||
lbPartOffset := partOffset + toRead - 1
|
||||
|
||||
// round up the lbPartOffset
|
||||
// to the end of the
|
||||
// corresponding DARE package
|
||||
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
|
||||
if lbPkgEndOffset > v {
|
||||
lbPkgEndOffset = v
|
||||
}
|
||||
bytesToDrop := v - lbPkgEndOffset
|
||||
|
||||
// Last segment to update `l`
|
||||
l += getEncSize(currentPartDareBytes - bytesToDrop)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
cumulativeSum += v
|
||||
cumulativeEncSum += getEncSize(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i, test := range testMPs {
|
||||
{
|
||||
// nil range
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(nil)
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
if o != 0 || l != esum(test.oi) || skip != 0 || sn != 0 || ps != 0 {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
|
||||
}
|
||||
}
|
||||
|
||||
// Skip 1Mib and read 1Mib (in the decrypted object)
|
||||
//
|
||||
// The check below ensures the object is large enough
|
||||
// for the read.
|
||||
if lsum(test.decSizes) >= 2*humanize.MiByte {
|
||||
skipLen, readLen := int64(1)*humanize.MiByte, int64(1)*humanize.MiByte
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, skipLen, skipLen + readLen - 1})
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
|
||||
oRef, lRef, skipRef, snRef, psRef := decryptedRangeRef(test.decSizes, skipLen, readLen, false)
|
||||
if o != oRef || l != lRef || skip != skipRef || sn != snRef || ps != psRef {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d (Ref: %d %d %d %d %d)",
|
||||
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the last 6Mib+1 bytes of the (decrypted)
|
||||
// object
|
||||
//
|
||||
// The check below ensures the object is large enough
|
||||
// for the read.
|
||||
readLen := int64(6)*humanize.MiByte + 1
|
||||
if lsum(test.decSizes) >= readLen {
|
||||
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{true, -readLen, -1})
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
|
||||
oRef, lRef, skipRef, snRef, psRef := decryptedRangeRef(test.decSizes, 0, readLen, true)
|
||||
if o != oRef || l != lRef || skip != skipRef || sn != snRef || ps != psRef {
|
||||
t.Errorf("Case %d: test failed: %d %d %d %d %d (Ref: %d %d %d %d %d)",
|
||||
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
var getDefaultOptsTests = []struct {
|
||||
headers http.Header
|
||||
copySource bool
|
||||
metadata map[string]string
|
||||
encryptionType encrypt.Type
|
||||
err error
|
||||
}{
|
||||
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
|
||||
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.SSEC,
|
||||
err: nil}, // 0
|
||||
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
|
||||
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: true,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: nil}, // 1
|
||||
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
|
||||
crypto.SSECKey: []string{"Mz"},
|
||||
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: crypto.ErrInvalidCustomerKey}, // 2
|
||||
{headers: http.Header{crypto.SSEHeader: []string{"AES256"}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.S3,
|
||||
err: nil}, // 3
|
||||
{headers: http.Header{},
|
||||
copySource: false,
|
||||
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
crypto.S3KMSKeyID: "kms-key",
|
||||
crypto.S3KMSSealedKey: "m-key"},
|
||||
encryptionType: encrypt.S3,
|
||||
err: nil}, // 4
|
||||
{headers: http.Header{},
|
||||
copySource: true,
|
||||
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
crypto.S3KMSKeyID: "kms-key",
|
||||
crypto.S3KMSSealedKey: "m-key"},
|
||||
encryptionType: "",
|
||||
err: nil}, // 5
|
||||
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
|
||||
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: true,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.SSEC,
|
||||
err: nil}, // 6
|
||||
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
|
||||
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: nil}, // 7
|
||||
}
|
||||
|
||||
func TestGetDefaultOpts(t *testing.T) {
|
||||
for i, test := range getDefaultOptsTests {
|
||||
opts, err := getDefaultOpts(test.headers, test.copySource, test.metadata)
|
||||
if test.err != err {
|
||||
t.Errorf("Case %d: expected err: %v , actual err: %v", i, test.err, err)
|
||||
}
|
||||
if err == nil {
|
||||
if opts.ServerSideEncryption == nil && test.encryptionType != "" {
|
||||
t.Errorf("Case %d: expected opts to be of %v encryption type", i, test.encryptionType)
|
||||
|
||||
}
|
||||
if opts.ServerSideEncryption != nil && test.encryptionType != opts.ServerSideEncryption.Type() {
|
||||
t.Errorf("Case %d: expected opts to have encryption type %v but was %v ", i, test.encryptionType, opts.ServerSideEncryption.Type())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
@@ -69,10 +71,26 @@ func getSetIndexes(args []string, totalSizes []uint64) (setIndexes [][]uint64, e
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
|
||||
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
||||
isValidSetSize := func(count uint64) bool {
|
||||
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
|
||||
}
|
||||
|
||||
var customSetDriveCount uint64
|
||||
if v := os.Getenv("MINIO_ERASURE_SET_DRIVE_COUNT"); v != "" {
|
||||
customSetDriveCount, err = strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, uiErrInvalidErasureSetSize(err)
|
||||
}
|
||||
if !isValidSetSize(customSetDriveCount) {
|
||||
return nil, uiErrInvalidErasureSetSize(nil)
|
||||
}
|
||||
}
|
||||
|
||||
setIndexes = make([][]uint64, len(totalSizes))
|
||||
for _, totalSize := range totalSizes {
|
||||
// Check if totalSize has minimum range upto setSize
|
||||
if totalSize < setSizes[0] {
|
||||
if totalSize < setSizes[0] || totalSize < customSetDriveCount {
|
||||
return nil, uiErrInvalidNumberOfErasureEndpoints(nil)
|
||||
}
|
||||
}
|
||||
@@ -95,9 +113,24 @@ func getSetIndexes(args []string, totalSizes []uint64) (setIndexes [][]uint64, e
|
||||
setSize = commonSize
|
||||
}
|
||||
|
||||
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
||||
isValidSetSize := func(count uint64) bool {
|
||||
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
|
||||
possibleSetCounts := func(setSize uint64) (ss []uint64) {
|
||||
for _, s := range setSizes {
|
||||
if setSize%s == 0 {
|
||||
ss = append(ss, s)
|
||||
}
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
if customSetDriveCount > 0 {
|
||||
msg := fmt.Sprintf("Invalid set drive count, leads to non-uniform distribution for the given number of disks. Possible values for custom set count are %d", possibleSetCounts(setSize))
|
||||
if customSetDriveCount > setSize {
|
||||
return nil, uiErrInvalidErasureSetSize(nil).Msg(msg)
|
||||
}
|
||||
if setSize%customSetDriveCount != 0 {
|
||||
return nil, uiErrInvalidErasureSetSize(nil).Msg(msg)
|
||||
}
|
||||
setSize = customSetDriveCount
|
||||
}
|
||||
|
||||
// Check whether setSize is with the supported range.
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -84,6 +85,65 @@ func TestGetDivisibleSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test tests calculating set indexes with ENV override for drive count.
|
||||
func TestGetSetIndexesEnvOverride(t *testing.T) {
|
||||
testCases := []struct {
|
||||
args []string
|
||||
totalSizes []uint64
|
||||
indexes [][]uint64
|
||||
envOverride string
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
[]string{"data{1...64}"},
|
||||
[]uint64{64},
|
||||
[][]uint64{{8, 8, 8, 8, 8, 8, 8, 8}},
|
||||
"8",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"data{1...60}"},
|
||||
nil,
|
||||
nil,
|
||||
"8",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"data{1...64}"},
|
||||
nil,
|
||||
nil,
|
||||
"-1",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"data{1...64}"},
|
||||
nil,
|
||||
nil,
|
||||
"2",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
if err := os.Setenv("MINIO_ERASURE_SET_DRIVE_COUNT", testCase.envOverride); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Errorf("Expected failure but passed instead")
|
||||
}
|
||||
if !reflect.DeepEqual(testCase.indexes, gotIndexes) {
|
||||
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes)
|
||||
}
|
||||
os.Unsetenv("MINIO_ERASURE_SET_DRIVE_COUNT")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test tests calculating set indexes.
|
||||
func TestGetSetIndexes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
@@ -166,6 +226,17 @@ func TestGetSetIndexes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getHexSequences(start int, number int, paddinglen int) (seq []string) {
|
||||
for i := start; i <= number; i++ {
|
||||
if paddinglen == 0 {
|
||||
seq = append(seq, fmt.Sprintf("%x", i))
|
||||
} else {
|
||||
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dx", paddinglen), i))
|
||||
}
|
||||
}
|
||||
return seq
|
||||
}
|
||||
|
||||
func getSequences(start int, number int, paddinglen int) (seq []string) {
|
||||
for i := start; i <= number; i++ {
|
||||
if paddinglen == 0 {
|
||||
@@ -227,9 +298,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"/export/set",
|
||||
"",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "/export/set",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -245,14 +316,14 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
{
|
||||
"http://minio",
|
||||
"/export/set",
|
||||
getSequences(2, 3, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: "/export/set",
|
||||
Seq: getSequences(2, 3, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -268,9 +339,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"http://minio",
|
||||
".mydomain.net/data",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: ".mydomain.net/data",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -285,14 +356,14 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"/data",
|
||||
getSequences(1, 16, 0),
|
||||
Prefix: "",
|
||||
Suffix: "/data",
|
||||
Seq: getSequences(1, 16, 0),
|
||||
},
|
||||
{
|
||||
"http://rack",
|
||||
".mydomain.minio",
|
||||
getSequences(1, 4, 0),
|
||||
Prefix: "http://rack",
|
||||
Suffix: ".mydomain.minio",
|
||||
Seq: getSequences(1, 4, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -308,14 +379,14 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(0, 1, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(0, 1, 0),
|
||||
},
|
||||
{
|
||||
"http://minio",
|
||||
".mydomain.net/data",
|
||||
getSequences(0, 15, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: ".mydomain.net/data",
|
||||
Seq: getSequences(0, 15, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -331,9 +402,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"http://server1/data",
|
||||
"",
|
||||
getSequences(1, 32, 0),
|
||||
Prefix: "http://server1/data",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 32, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -349,9 +420,9 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"http://server1/data",
|
||||
"",
|
||||
getSequences(1, 32, 2),
|
||||
Prefix: "http://server1/data",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 32, 2),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -367,19 +438,19 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(1, 2, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 2, 0),
|
||||
},
|
||||
{
|
||||
"",
|
||||
"/test",
|
||||
getSequences(1, 64, 0),
|
||||
Prefix: "",
|
||||
Suffix: "/test",
|
||||
Seq: getSequences(1, 64, 0),
|
||||
},
|
||||
{
|
||||
"http://minio",
|
||||
"/export/set",
|
||||
getSequences(2, 3, 0),
|
||||
Prefix: "http://minio",
|
||||
Suffix: "/export/set",
|
||||
Seq: getSequences(2, 3, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -396,14 +467,60 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
getSequences(1, 10, 0),
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
{
|
||||
"/export",
|
||||
"/disk",
|
||||
getSequences(1, 10, 0),
|
||||
Prefix: "/export",
|
||||
Suffix: "/disk",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
|
||||
},
|
||||
true,
|
||||
},
|
||||
// IPv6 ellipses with hexadecimal expansion
|
||||
{
|
||||
"http://[2001:3984:3989::{1...a}]/disk{1...10}",
|
||||
endpointSet{
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
{
|
||||
Prefix: "http://[2001:3984:3989::",
|
||||
Suffix: "]/disk",
|
||||
Seq: getHexSequences(1, 10, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
[][]uint64{{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}},
|
||||
},
|
||||
true,
|
||||
},
|
||||
// IPv6 ellipses with hexadecimal expansion with 3 position numerics.
|
||||
{
|
||||
"http://[2001:3984:3989::{001...00a}]/disk{1...10}",
|
||||
endpointSet{
|
||||
[]ellipses.ArgPattern{
|
||||
[]ellipses.Pattern{
|
||||
{
|
||||
Prefix: "",
|
||||
Suffix: "",
|
||||
Seq: getSequences(1, 10, 0),
|
||||
},
|
||||
{
|
||||
Prefix: "http://[2001:3984:3989::",
|
||||
Suffix: "]/disk",
|
||||
Seq: getHexSequences(1, 10, 3),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user