Compare commits
539 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
817269475f | ||
|
|
2d168b532b | ||
|
|
fd4e15c116 | ||
|
|
3dfbe0f68c | ||
|
|
30135eed86 | ||
|
|
e4081aee62 | ||
|
|
df418a2783 | ||
|
|
9a65f6dc97 | ||
|
|
f04f8bbc78 | ||
|
|
ea6d61ab1f | ||
|
|
6f08edfb36 | ||
|
|
e9fdea05c6 | ||
|
|
e005910051 | ||
|
|
de2c106386 | ||
|
|
32a6dd1dd6 | ||
|
|
432aec73d9 | ||
|
|
36dae04671 | ||
|
|
9dc9f03c02 | ||
|
|
b18c0478e7 | ||
|
|
2d9860e875 | ||
|
|
d3553f8dfc | ||
|
|
e1ae90c12b | ||
|
|
34e7259f95 | ||
|
|
d0015b4d66 | ||
|
|
3467460456 | ||
|
|
64b5701971 | ||
|
|
fad59da29d | ||
|
|
91c839ad28 | ||
|
|
2786055df4 | ||
|
|
0a28c28a8c | ||
|
|
526546d588 | ||
|
|
2053b3414f | ||
|
|
ce870466ff | ||
|
|
964e354d06 | ||
|
|
8ee8ad777c | ||
|
|
82af0be1aa | ||
|
|
e8c18bc145 | ||
|
|
bd25f31100 | ||
|
|
ee7dcc2903 | ||
|
|
55ef51a99d | ||
|
|
dc2348daa5 | ||
|
|
042d7f25e4 | ||
|
|
8c1b649b2d | ||
|
|
f03ccec912 | ||
|
|
0bb65f84bb | ||
|
|
8e0910ab3e | ||
|
|
5353edcc38 | ||
|
|
3265112d04 | ||
|
|
4fdacb8b14 | ||
|
|
267f183fc8 | ||
|
|
3d22a9d84f | ||
|
|
51ec61ee94 | ||
|
|
74c2048ea9 | ||
|
|
730ac5381c | ||
|
|
6dd8a83c5a | ||
|
|
1a7e6d4768 | ||
|
|
98c950aacd | ||
|
|
94c52e3816 | ||
|
|
8766c5eb22 | ||
|
|
e0d22359e7 | ||
|
|
6dd13e68c2 | ||
|
|
633001c8ba | ||
|
|
e23a42305c | ||
|
|
63d2583e91 | ||
|
|
a2f66abbe8 | ||
|
|
b28661b673 | ||
|
|
e8791ae274 | ||
|
|
309975d477 | ||
|
|
6571641735 | ||
|
|
8757c963ba | ||
|
|
9a71f2fdfa | ||
|
|
9c26fe47b0 | ||
|
|
f3f47d8cd3 | ||
|
|
de1d39e436 | ||
|
|
ed1275a063 | ||
|
|
a7d407fa42 | ||
|
|
4e6e05f8e0 | ||
|
|
b0deea27df | ||
|
|
e98d89274f | ||
|
|
c59206bcd3 | ||
|
|
7f2d439baa | ||
|
|
5a80cbec2a | ||
|
|
2d19011a1d | ||
|
|
e82dcd195c | ||
|
|
fcb56d864c | ||
|
|
75cd4201b0 | ||
|
|
f24c017e9a | ||
|
|
b5280ba243 | ||
|
|
2a0e4b6f58 | ||
|
|
1898961ce3 | ||
|
|
236796ebd6 | ||
|
|
4e4f855b30 | ||
|
|
2db22deb93 | ||
|
|
fb8d0d7cf7 | ||
|
|
a536cf5dc0 | ||
|
|
b9b68e9331 | ||
|
|
632022971b | ||
|
|
def04f01cf | ||
|
|
bc67410548 | ||
|
|
7881791a91 | ||
|
|
d2f8f8c7ee | ||
|
|
8c32311b80 | ||
|
|
9bb88e610e | ||
|
|
d1e41695fe | ||
|
|
2aeb3fbe86 | ||
|
|
99b843a64e | ||
|
|
4f31a9a33b | ||
|
|
65ddff8899 | ||
|
|
e7c902bbbc | ||
|
|
5a5895203b | ||
|
|
7da0336ac8 | ||
|
|
3be616de3f | ||
|
|
c5bf22fd90 | ||
|
|
7c9f934875 | ||
|
|
b6f9b24b30 | ||
|
|
13cb814a0e | ||
|
|
d264d2c899 | ||
|
|
bebaff269c | ||
|
|
52b159b1db | ||
|
|
324834e4da | ||
|
|
c2ed1347d9 | ||
|
|
50f6f9fe58 | ||
|
|
48cb0ea34b | ||
|
|
6f7c99a333 | ||
|
|
3498f5b0ec | ||
|
|
21d8c0fd13 | ||
|
|
79b9a9ce46 | ||
|
|
b9b353db4b | ||
|
|
11a9b317a3 | ||
|
|
9af7d627ac | ||
|
|
76d9d54603 | ||
|
|
4c7c571875 | ||
|
|
313ba74b09 | ||
|
|
3e124315c8 | ||
|
|
78a0fd951e | ||
|
|
40852801ea | ||
|
|
f6980c4630 | ||
|
|
8fcc787cba | ||
|
|
4e6d3c093f | ||
|
|
61145361fd | ||
|
|
950b4ad9af | ||
|
|
6add646130 | ||
|
|
20e61fb362 | ||
|
|
18ced1102c | ||
|
|
d6af3c1237 | ||
|
|
5549a44566 | ||
|
|
e7af31c2ff | ||
|
|
e7971b1d55 | ||
|
|
26120d7838 | ||
|
|
bef7c01c58 | ||
|
|
6a8ccc5925 | ||
|
|
d85199e9de | ||
|
|
8608a84c23 | ||
|
|
b7226f4c82 | ||
|
|
f930ffe9e2 | ||
|
|
b50a245208 | ||
|
|
45bb11e020 | ||
|
|
b65cf281fd | ||
|
|
f781548b0c | ||
|
|
25ee8e74f7 | ||
|
|
c975d2cc7e | ||
|
|
ea66528739 | ||
|
|
e1164103d4 | ||
|
|
83fe70f710 | ||
|
|
12a6523fb2 | ||
|
|
dba61867e8 | ||
|
|
dd092f6c2b | ||
|
|
2a810c7da2 | ||
|
|
dd8c2aa5c6 | ||
|
|
9e3fce441e | ||
|
|
d4265f9a13 | ||
|
|
2fc024e880 | ||
|
|
eddf468aef | ||
|
|
b0d04b9a81 | ||
|
|
a9de303d8b | ||
|
|
69bd6df464 | ||
|
|
bfb505aa8e | ||
|
|
d732b1ff9d | ||
|
|
ef517bd0da | ||
|
|
32d837cf88 | ||
|
|
7b579caf68 | ||
|
|
272b8003d6 | ||
|
|
1c24c93f73 | ||
|
|
712abc7958 | ||
|
|
5f6d717b7a | ||
|
|
7e1661f4fa | ||
|
|
f9779b24ad | ||
|
|
f1f23f6f11 | ||
|
|
cf26c937e4 | ||
|
|
f19f957668 | ||
|
|
d6572879a8 | ||
|
|
b6ab8f50fa | ||
|
|
c82acc599a | ||
|
|
2447bb58dd | ||
|
|
a55a298e00 | ||
|
|
2929c1832d | ||
|
|
aa2d8583ad | ||
|
|
df2d75a2a3 | ||
|
|
b24b320807 | ||
|
|
c872c1f1dc | ||
|
|
a40610d331 | ||
|
|
38978eb2aa | ||
|
|
ca7c3a3278 | ||
|
|
d58fc68137 | ||
|
|
71c66464c1 | ||
|
|
bf414068a3 | ||
|
|
88959ce600 | ||
|
|
572719872d | ||
|
|
bdea19b583 | ||
|
|
eb1f9c9916 | ||
|
|
d07fb41fe8 | ||
|
|
a9cda850ca | ||
|
|
bef0318c36 | ||
|
|
3f19ea98bb | ||
|
|
c96073f985 | ||
|
|
d2f240c791 | ||
|
|
6491dfbbd6 | ||
|
|
d9cfa5fcd3 | ||
|
|
89b14639a9 | ||
|
|
3f744c0361 | ||
|
|
9ed7fb4916 | ||
|
|
4280e68de3 | ||
|
|
f162d7bd97 | ||
|
|
36990aeafd | ||
|
|
9fe51e392b | ||
|
|
7e879a45d5 | ||
|
|
1c911c5f40 | ||
|
|
bd8dc17b7a | ||
|
|
8491a29ec3 | ||
|
|
81d21850ec | ||
|
|
c6ec3fdfba | ||
|
|
88c3dd49c6 | ||
|
|
6869f6d9dd | ||
|
|
3f643acb99 | ||
|
|
ea73accefd | ||
|
|
555d54371c | ||
|
|
bab4c90c45 | ||
|
|
a2fc0b14d6 | ||
|
|
bf66e9a529 | ||
|
|
fde8c38638 | ||
|
|
e6252dee5a | ||
|
|
ecb042aa1c | ||
|
|
e29009d347 | ||
|
|
9631d65552 | ||
|
|
7b7be66fa1 | ||
|
|
b99aaab42e | ||
|
|
32bd1b31e9 | ||
|
|
f287b15e71 | ||
|
|
586466584f | ||
|
|
c0b4bf0a3e | ||
|
|
acf46cc3b5 | ||
|
|
06ef8248c3 | ||
|
|
7c2ae4eaf7 | ||
|
|
989d7af9ac | ||
|
|
8a6c3aa3cd | ||
|
|
62b560510b | ||
|
|
0edfb32621 | ||
|
|
7e0f1eb8b5 | ||
|
|
b43e8337b1 | ||
|
|
30040fba45 | ||
|
|
44cf9ac62f | ||
|
|
3b55357045 | ||
|
|
18d9a20ff6 | ||
|
|
6590aba6d2 | ||
|
|
2e81f27d27 | ||
|
|
ae3c05aa37 | ||
|
|
cef044178c | ||
|
|
c998d1ac8c | ||
|
|
3457e504cf | ||
|
|
7f0cca075a | ||
|
|
088c595e01 | ||
|
|
26b4b466df | ||
|
|
fdf691fdcc | ||
|
|
88c8c2d6cd | ||
|
|
ef585037a0 | ||
|
|
362ebdcbab | ||
|
|
b251454dd6 | ||
|
|
21c8693d9c | ||
|
|
1e7e5e297c | ||
|
|
c7f180ffa9 | ||
|
|
b8bd8d6a03 | ||
|
|
baec331e84 | ||
|
|
557f382477 | ||
|
|
23b166b318 | ||
|
|
81a481e098 | ||
|
|
5b3090dffc | ||
|
|
3ef3fefd54 | ||
|
|
28e25eac78 | ||
|
|
b0c9ae7490 | ||
|
|
143e7fe300 | ||
|
|
f09e7ca764 | ||
|
|
fae284d6b9 | ||
|
|
83d8e01c81 | ||
|
|
110458cd10 | ||
|
|
e3eec89d24 | ||
|
|
54ae364def | ||
|
|
16a100b597 | ||
|
|
cbc5d78a09 | ||
|
|
d8a2975a68 | ||
|
|
66d911653f | ||
|
|
ca6f795504 | ||
|
|
2af0f11731 | ||
|
|
c3408f4f04 | ||
|
|
b92c324254 | ||
|
|
81bee93b8d | ||
|
|
670f9788e3 | ||
|
|
f187a16962 | ||
|
|
e031f2b614 | ||
|
|
c2b7b82ef4 | ||
|
|
02ad9d6072 | ||
|
|
ea9408ccbb | ||
|
|
307765591d | ||
|
|
5d859b2178 | ||
|
|
223967fd32 | ||
|
|
274b35154c | ||
|
|
c05ced08bb | ||
|
|
c7722fbb1b | ||
|
|
f163bed40d | ||
|
|
b4772849f9 | ||
|
|
839a758a36 | ||
|
|
aebfceeafb | ||
|
|
83d7ec09c1 | ||
|
|
8c29f69b00 | ||
|
|
ce9d36d954 | ||
|
|
5c765bc63e | ||
|
|
6c7c6bec91 | ||
|
|
ed703c065d | ||
|
|
387584356f | ||
|
|
1111419d4a | ||
|
|
20378821cf | ||
|
|
ce1bfa6de8 | ||
|
|
6c26227081 | ||
|
|
aa4e2b1542 | ||
|
|
1e5ac39ff3 | ||
|
|
ec2295c3dc | ||
|
|
8cf7b88cc5 | ||
|
|
48bfebe442 | ||
|
|
df60b3c733 | ||
|
|
584cb61bb8 | ||
|
|
c1798cc89a | ||
|
|
3c8fabd116 | ||
|
|
36e51d0cee | ||
|
|
7d0645fb3a | ||
|
|
7c339e248a | ||
|
|
b62ed5dc90 | ||
|
|
f0641a0406 | ||
|
|
3d060f8b64 | ||
|
|
052a7b8eec | ||
|
|
9531cddb06 | ||
|
|
6fe9a613c0 | ||
|
|
b729a4e83c | ||
|
|
a0683d3c1f | ||
|
|
66fda7a37f | ||
|
|
a63bc9254d | ||
|
|
14fa0097b0 | ||
|
|
d99c397746 | ||
|
|
e3777b1dd9 | ||
|
|
63c03758e6 | ||
|
|
ce419c9835 | ||
|
|
0c2b708484 | ||
|
|
166e998788 | ||
|
|
267a0a3dfa | ||
|
|
985fd7d4e7 | ||
|
|
5479a6e33e | ||
|
|
fb4186f6b9 | ||
|
|
7571582000 | ||
|
|
12b4971b70 | ||
|
|
5c0b98abf0 | ||
|
|
30d4a2cf53 | ||
|
|
92bc7caf7a | ||
|
|
19202bae81 | ||
|
|
e7a4512a90 | ||
|
|
ff822e64ca | ||
|
|
7cb87f863e | ||
|
|
67d8396af4 | ||
|
|
8b0cc376f4 | ||
|
|
9e5c4df106 | ||
|
|
fd8749f42a | ||
|
|
5c13765168 | ||
|
|
3099af70a3 | ||
|
|
fd1b8491db | ||
|
|
1961f2ef54 | ||
|
|
631c78e655 | ||
|
|
e0f8b767ba | ||
|
|
d0d015361c | ||
|
|
81b7e5c7a8 | ||
|
|
9e32cc283f | ||
|
|
1126410e62 | ||
|
|
2c46214291 | ||
|
|
d13bd5b9b5 | ||
|
|
c8c70a3750 | ||
|
|
882a1a1ccc | ||
|
|
8690d62146 | ||
|
|
85117d554f | ||
|
|
4487f70f08 | ||
|
|
fb27388101 | ||
|
|
5e7ccc983d | ||
|
|
72fa2b4537 | ||
|
|
5399d91965 | ||
|
|
53a0bbeb5b | ||
|
|
384a862940 | ||
|
|
029f52880b | ||
|
|
5b05df215a | ||
|
|
a13cd7b7c4 | ||
|
|
d524924b80 | ||
|
|
e6d740ce09 | ||
|
|
cea4f82586 | ||
|
|
1d6ce115da | ||
|
|
06d2dfa31c | ||
|
|
d547873b17 | ||
|
|
01721a840a | ||
|
|
52f6d5aafc | ||
|
|
2211a5f1b8 | ||
|
|
1ffa6adcd4 | ||
|
|
add57a6938 | ||
|
|
dafa5073cb | ||
|
|
65e05a06fb | ||
|
|
5c9354894b | ||
|
|
b01e69e08f | ||
|
|
8601f29d95 | ||
|
|
0aee722e3f | ||
|
|
beb6d40ce6 | ||
|
|
19db921555 | ||
|
|
68b9e9e7e7 | ||
|
|
2d84b02bc4 | ||
|
|
8b2801bd46 | ||
|
|
7d7e21aebb | ||
|
|
bf14e5ce1b | ||
|
|
d531080b7e | ||
|
|
a6b8a5487a | ||
|
|
6c0d53a1c5 | ||
|
|
9f14433cbd | ||
|
|
50a817e3d3 | ||
|
|
5a4a57700b | ||
|
|
3de5a3157f | ||
|
|
50dec08002 | ||
|
|
e71ef905f9 | ||
|
|
3d197c1449 | ||
|
|
65de2d68c0 | ||
|
|
1103ad2d08 | ||
|
|
eab947cf42 | ||
|
|
0fe9e95250 | ||
|
|
c7946ab9ab | ||
|
|
f5df3b4795 | ||
|
|
f26325c988 | ||
|
|
7c14cdb60e | ||
|
|
0e02328c98 | ||
|
|
380524ae27 | ||
|
|
0286e61aee | ||
|
|
ff29aed05d | ||
|
|
64f2c61813 | ||
|
|
525c04fd07 | ||
|
|
efac90461a | ||
|
|
71979376b5 | ||
|
|
5a1ae862a7 | ||
|
|
6df20734f9 | ||
|
|
98bce72295 | ||
|
|
2f1756489e | ||
|
|
9719640e34 | ||
|
|
ce02ab613d | ||
|
|
37de2dbd3b | ||
|
|
a82500f162 | ||
|
|
2dede2fdc2 | ||
|
|
13fbb96736 | ||
|
|
eabfcea34e | ||
|
|
a078703214 | ||
|
|
bd2b22572f | ||
|
|
5f69f04909 | ||
|
|
a1a426e523 | ||
|
|
a091b1a3ee | ||
|
|
556a51120c | ||
|
|
eb391a53c1 | ||
|
|
e17e09ea3c | ||
|
|
76c423392a | ||
|
|
8e6d756e3a | ||
|
|
a7c9058375 | ||
|
|
b16e33bcf5 | ||
|
|
197af49c99 | ||
|
|
264cc4020f | ||
|
|
df88421087 | ||
|
|
dbd89bbae3 | ||
|
|
224a272cf2 | ||
|
|
ad86454580 | ||
|
|
644c2ce326 | ||
|
|
2debe77586 | ||
|
|
20480ba3f7 | ||
|
|
4f52d22c36 | ||
|
|
cbe8df198e | ||
|
|
157ed65c35 | ||
|
|
869018ad14 | ||
|
|
5acc2a6db1 | ||
|
|
2cd14f567c | ||
|
|
f1be356cc6 | ||
|
|
76ddf4d32f | ||
|
|
7b91bd71fe | ||
|
|
36ab615518 | ||
|
|
963a70053b | ||
|
|
9c5e971a58 | ||
|
|
b1c9eb0e01 | ||
|
|
b8f4f26cf6 | ||
|
|
43cc0096fa | ||
|
|
e8a008f5b5 | ||
|
|
758a80e39b | ||
|
|
3ec4738955 | ||
|
|
6c93c60424 | ||
|
|
0c9f4c9092 | ||
|
|
289d6ce1d7 | ||
|
|
2a12e694f3 | ||
|
|
db26d3c9e2 | ||
|
|
914c76a801 | ||
|
|
a1ef90be52 | ||
|
|
7c4a41b933 | ||
|
|
2aa18cafc6 | ||
|
|
adf7340394 | ||
|
|
b11a8eb3f4 | ||
|
|
15771ebe8d | ||
|
|
44865596db | ||
|
|
c9bc7e47b9 | ||
|
|
be1700f595 | ||
|
|
42c5b64e4e | ||
|
|
40ed0d1f5d | ||
|
|
b181a693fb | ||
|
|
4ddc222f46 | ||
|
|
c310cbbe89 | ||
|
|
0ef0d7e685 | ||
|
|
c62813c887 | ||
|
|
1da362538b | ||
|
|
e40a5e05e1 | ||
|
|
b0b0fb4c8d | ||
|
|
726e75611e | ||
|
|
317e648c0d | ||
|
|
80b3e9cb03 | ||
|
|
6c85706c24 | ||
|
|
d7ced9a8b5 | ||
|
|
360f3f9335 | ||
|
|
25f9b0bc3b | ||
|
|
a5453c307f | ||
|
|
92a6676a2f | ||
|
|
f53d511798 |
4
.github/ISSUE_TEMPLATE.md
vendored
4
.github/ISSUE_TEMPLATE.md
vendored
@@ -24,6 +24,10 @@
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,6 +7,10 @@
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this PR fixing a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include minio version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, and the tests you ran to -->
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -22,3 +22,4 @@ parts/
|
||||
prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
64
.travis.yml
64
.travis.yml
@@ -1,34 +1,50 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
dist: trusty
|
||||
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 15m "$d"; done
|
||||
- make verifiers
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- make coverage
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
|
||||
- bash buildscripts/go-coverage.sh
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- nvm install stable
|
||||
|
||||
script:
|
||||
## Run all the tests
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- make test GOFLAGS="-timeout 15m -race -v"
|
||||
- make coverage
|
||||
- node --version
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
go:
|
||||
- '1.10.1'
|
||||
|
||||
@@ -68,4 +68,4 @@ To remove a dependency
|
||||
- Run `make pkg-remove PKG=foo/bar` from top-level directory
|
||||
|
||||
### What are the coding guidelines for Minio?
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](slack.minio.io).
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.minio.io).
|
||||
|
||||
28
Dockerfile
28
Dockerfile
@@ -1,28 +1,32 @@
|
||||
FROM golang:1.10.1-alpine3.7
|
||||
FROM golang:1.11.4-alpine3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates curl && \
|
||||
apk add --no-cache --virtual .build-deps git && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
go get -v -d github.com/minio/minio && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
|
||||
EXPOSE 9000
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
|
||||
@@ -1,27 +1,20 @@
|
||||
FROM golang:1.10.1-alpine3.7
|
||||
FROM alpine:3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $PATH:$GOPATH/bin
|
||||
ENV CGO_ENABLED 0
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
WORKDIR /go/src/github.com/minio/
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh dockerscripts/healthcheck.sh /usr/bin/
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates curl && \
|
||||
apk add --no-cache --virtual .build-deps git && \
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
cd /go/src/github.com/minio/minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
|
||||
rm -rf /go/pkg /go/src /usr/local/go && apk del .build-deps
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/healthcheck.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -9,8 +9,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates && \
|
||||
apk add --no-cache --virtual .build-deps curl && \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
|
||||
49
Dockerfile.simpleci
Normal file
49
Dockerfile.simpleci
Normal file
@@ -0,0 +1,49 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.11.4
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
|
||||
RUN make verify
|
||||
RUN make coverage || true
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio ./minio
|
||||
COPY buildscripts/gateway-tests.sh ./gateway-tests.sh
|
||||
RUN apt-get update && apt-get install -y git wget jq curl dnsmasq
|
||||
|
||||
RUN wget https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
RUN mkdir -p /go
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
RUN ./gateway-tests.sh
|
||||
22
Makefile
22
Makefile
@@ -13,18 +13,21 @@ checks:
|
||||
@(env bash $(PWD)/buildscripts/checkgopath.sh)
|
||||
|
||||
getdeps:
|
||||
@echo "Installing golint" && go get -u github.com/golang/lint/golint
|
||||
@echo "Installing golint" && go get -u golang.org/x/lint/golint
|
||||
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo
|
||||
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
|
||||
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint cyclo deadcode spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult cmd
|
||||
@go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult pkg
|
||||
@go tool vet cmd
|
||||
@go tool vet pkg
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@@ -42,8 +45,8 @@ ineffassign:
|
||||
|
||||
cyclo:
|
||||
@echo "Running $@"
|
||||
@${GOPATH}/bin/gocyclo -over 100 cmd
|
||||
@${GOPATH}/bin/gocyclo -over 100 pkg
|
||||
@${GOPATH}/bin/gocyclo -over 200 cmd
|
||||
@${GOPATH}/bin/gocyclo -over 200 pkg
|
||||
|
||||
deadcode:
|
||||
@echo "Running $@"
|
||||
@@ -60,7 +63,9 @@ spelling:
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@go test $(GOFLAGS) -tags kqueue ./...
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
@@ -71,7 +76,10 @@ coverage: build
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
pkg-add:
|
||||
@echo "Adding new package $(PKG)"
|
||||
|
||||
11
README.md
11
README.md
@@ -17,7 +17,7 @@ docker run -p 9000:9000 minio/minio server /data
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
|
||||
Note: Docker will not display the autogenerated keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use autogenerated keys with containers. Please visit Minio Docker quickstart guide for more information [here](https://docs.minio.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
@@ -53,6 +53,15 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| Platform| Architecture | URL|
|
||||
| ----------| -------- | ------|
|
||||
|GNU/Linux|ppc64le|https://dl.minio.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.minio.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
## Microsoft Windows
|
||||
### Binary Download
|
||||
| Platform| Architecture | URL|
|
||||
|
||||
55
appveyor.yml
55
appveyor.yml
@@ -1,55 +0,0 @@
|
||||
# version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
# Platform.
|
||||
platform: x64
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\minio\minio
|
||||
|
||||
# Environment variables
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOROOT: c:\go
|
||||
|
||||
# scripts that run after cloning repository
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH%
|
||||
- go version
|
||||
- go env
|
||||
- python --version
|
||||
|
||||
# To run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
# Compile
|
||||
# We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
|
||||
- ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
|
||||
- appveyor AddCompilationMessage "Starting Compile"
|
||||
- cd c:\gopath\src\github.com\minio\minio
|
||||
- go run buildscripts/gen-ldflags.go > temp.txt
|
||||
- set /p BUILD_LDFLAGS=<temp.txt
|
||||
- go build -ldflags="%BUILD_LDFLAGS%" -o %GOPATH%\bin\minio.exe
|
||||
- appveyor AddCompilationMessage "Compile Success"
|
||||
|
||||
# To run your custom scripts instead of automatic tests
|
||||
test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G )
|
||||
- go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
- go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html
|
||||
- ps: Push-AppveyorArtifact build\coverage\coverage.txt
|
||||
- ps: Push-AppveyorArtifact build\coverage\coverage.html
|
||||
# Upload coverage report.
|
||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
||||
- pip install codecov
|
||||
- codecov -X gcov -f "build\coverage\coverage.txt"
|
||||
|
||||
# to disable deployment
|
||||
deploy: off
|
||||
@@ -5,13 +5,13 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Minio Browser</title>
|
||||
<link rel="stylesheet" href="loader.css" type="text/css">
|
||||
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="page-load">
|
||||
<div class="pl-inner">
|
||||
<img src="logo.svg" alt="">
|
||||
<img src="/minio/logo.svg" alt="">
|
||||
</div>
|
||||
</div>
|
||||
<div id="root"></div>
|
||||
@@ -51,6 +51,6 @@
|
||||
<![endif]-->
|
||||
|
||||
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
|
||||
<script src="index_bundle.js"></script>
|
||||
<script src="/minio/index_bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -22,7 +22,7 @@ export let alertId = 0
|
||||
export const set = alert => {
|
||||
const id = alertId++
|
||||
return (dispatch, getState) => {
|
||||
if (alert.type !== "danger") {
|
||||
if (alert.type !== "danger" || alert.autoClear) {
|
||||
setTimeout(() => {
|
||||
dispatch({
|
||||
type: CLEAR,
|
||||
|
||||
@@ -67,8 +67,12 @@ const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
fetchBuckets: () => dispatch(actionsBuckets.fetchBuckets()),
|
||||
setBucketList: buckets => dispatch(actionsBuckets.setList(buckets)),
|
||||
selectBucket: bucket => dispatch(actionsBuckets.selectBucket(bucket))
|
||||
selectBucket: (bucket, prefix) =>
|
||||
dispatch(actionsBuckets.selectBucket(bucket, prefix))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(BucketList)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(BucketList)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE } from '../constants'
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE, NONE } from '../constants'
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
@@ -42,37 +42,40 @@ export class Policy extends React.Component {
|
||||
render() {
|
||||
const {policy, prefix} = this.props
|
||||
let newPrefix = prefix
|
||||
|
||||
if (newPrefix === '')
|
||||
newPrefix = '*'
|
||||
|
||||
return (
|
||||
<div className="pmb-list">
|
||||
<div className="pmbl-item">
|
||||
{ newPrefix }
|
||||
if (policy === NONE) {
|
||||
return <noscript />
|
||||
} else {
|
||||
return (
|
||||
<div className="pmb-list">
|
||||
<div className="pmbl-item">
|
||||
{ newPrefix }
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<select className="form-control"
|
||||
disabled
|
||||
value={ policy }>
|
||||
<option value={ READ_ONLY }>
|
||||
Read Only
|
||||
</option>
|
||||
<option value={ WRITE_ONLY }>
|
||||
Write Only
|
||||
</option>
|
||||
<option value={ READ_WRITE }>
|
||||
Read and Write
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<button className="btn btn-block btn-danger" onClick={ this.removePolicy.bind(this) }>
|
||||
Remove
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<select className="form-control"
|
||||
disabled
|
||||
value={ policy }>
|
||||
<option value={ READ_ONLY }>
|
||||
Read Only
|
||||
</option>
|
||||
<option value={ WRITE_ONLY }>
|
||||
Write Only
|
||||
</option>
|
||||
<option value={ READ_WRITE }>
|
||||
Read and Write
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
<div className="pmbl-item">
|
||||
<button className="btn btn-block btn-danger" onClick={ this.removePolicy.bind(this) }>
|
||||
Remove
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
import React from "react"
|
||||
import { shallow, mount } from "enzyme"
|
||||
import { Policy } from "../Policy"
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE } from "../../constants"
|
||||
import { READ_ONLY, WRITE_ONLY, READ_WRITE, NONE } from "../../constants"
|
||||
import web from "../../web"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
@@ -31,6 +31,11 @@ describe("Policy", () => {
|
||||
shallow(<Policy currentBucket={"bucket"} prefix={"foo"} policy={READ_ONLY} />)
|
||||
})
|
||||
|
||||
it("should not render when policy is listed as 'none'", () => {
|
||||
const wrapper = shallow(<Policy currentBucket={"bucket"} prefix={"foo"} policy={NONE} />)
|
||||
expect(wrapper.find(".pmb-list").length).toBe(0)
|
||||
})
|
||||
|
||||
it("should call web.setBucketPolicy and fetchPolicies on submit", () => {
|
||||
const fetchPolicies = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
@@ -23,6 +23,7 @@ export const minioBrowserPrefix = p.slice(0, p.indexOf("/", 1))
|
||||
export const READ_ONLY = "readonly"
|
||||
export const WRITE_ONLY = "writeonly"
|
||||
export const READ_WRITE = "readwrite"
|
||||
export const NONE = "none"
|
||||
|
||||
export const SHARE_OBJECT_EXPIRY_DAYS = 5
|
||||
export const SHARE_OBJECT_EXPIRY_HOURS = 0
|
||||
|
||||
@@ -57,7 +57,7 @@ export class ObjectActions extends React.Component {
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { object, showShareObjectModal } = this.props
|
||||
const { object, showShareObjectModal, shareObjectName } = this.props
|
||||
return (
|
||||
<Dropdown id={`obj-actions-${object.name}`}>
|
||||
<Dropdown.Toggle noCaret className="fia-toggle" />
|
||||
@@ -77,7 +77,8 @@ export class ObjectActions extends React.Component {
|
||||
<i className="fa fa-trash" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{showShareObjectModal && <ShareObjectModal object={object} />}
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
<ShareObjectModal object={object} />}
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
@@ -92,7 +93,8 @@ export class ObjectActions extends React.Component {
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
showShareObjectModal: state.objects.shareObject.show
|
||||
showShareObjectModal: state.objects.shareObject.show,
|
||||
shareObjectName: state.objects.shareObject.object
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -88,8 +88,21 @@ describe("ObjectActions", () => {
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
showShareObjectModal={true}
|
||||
shareObjectName={"obj1"}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("Connect(ShareObjectModal)").length).toBe(1)
|
||||
})
|
||||
|
||||
it("shouldn't render ShareObjectModal when the names of the objects don't match", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
showShareObjectModal={true}
|
||||
shareObjectName={"obj2"}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("Connect(ShareObjectModal)").length).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,16 +19,29 @@ import thunk from "redux-thunk"
|
||||
import * as actionsObjects from "../actions"
|
||||
import * as alertActions from "../../alert/actions"
|
||||
import { minioBrowserPrefix } from "../../constants"
|
||||
import history from "../../history"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
LoggedIn: jest.fn(() => true).mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(() => {
|
||||
return Promise.resolve({
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
istruncated: false,
|
||||
nextmarker: "test2",
|
||||
writable: false
|
||||
})
|
||||
LoggedIn: jest
|
||||
.fn(() => true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
return Promise.reject({
|
||||
message: "listobjects is denied"
|
||||
})
|
||||
} else {
|
||||
return Promise.resolve({
|
||||
objects: [{ name: "test1" }, { name: "test2" }],
|
||||
istruncated: false,
|
||||
nextmarker: "test2",
|
||||
writable: false
|
||||
})
|
||||
}
|
||||
}),
|
||||
RemoveObject: jest.fn(({ bucketName, objects }) => {
|
||||
if (!bucketName) {
|
||||
@@ -160,6 +173,41 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/RESET_LIST after failing to fetch the objects from bucket with ListObjects denied for LoggedIn users", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-deny" },
|
||||
objects: { currentPrefix: "" }
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "danger",
|
||||
message: "listobjects is denied",
|
||||
id: alertActions.alertId,
|
||||
autoClear: true
|
||||
}
|
||||
},
|
||||
{
|
||||
type: "object/RESET_LIST"
|
||||
}
|
||||
]
|
||||
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("redirect to login after failing to fetch the objects from bucket for non-LoggedIn users", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-deny" },
|
||||
objects: { currentPrefix: "" }
|
||||
})
|
||||
return store.dispatch(actionsObjects.fetchObjects()).then(() => {
|
||||
expect(history.location.pathname.endsWith("/login")).toBeTruthy()
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SORT_BY and objects/SET_SORT_ORDER when sortObjects is called", () => {
|
||||
const store = mockStore({
|
||||
objects: {
|
||||
@@ -244,7 +292,11 @@ describe("Objects actions", () => {
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: { type: "danger", message: "Invalid bucket", id: 0 }
|
||||
alert: {
|
||||
type: "danger",
|
||||
message: "Invalid bucket",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store.dispatch(actionsObjects.deleteObject("obj1")).then(() => {
|
||||
@@ -355,7 +407,7 @@ describe("Objects actions", () => {
|
||||
store.dispatch(actionsObjects.downloadObject("obj1"))
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=''`
|
||||
}${minioBrowserPrefix}/download/bk1/${encodeURI("pre1/obj1")}?token=`
|
||||
expect(setLocation).toHaveBeenCalledWith(url)
|
||||
})
|
||||
|
||||
|
||||
@@ -16,17 +16,15 @@
|
||||
|
||||
import web from "../web"
|
||||
import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
} from "../utils"
|
||||
import { sortObjectsByName, sortObjectsBySize, sortObjectsByDate } from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import { minioBrowserPrefix } from "../constants"
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "object/RESET_LIST"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -45,6 +43,10 @@ export const setList = (objects, marker, isTruncated) => ({
|
||||
isTruncated
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
})
|
||||
|
||||
export const appendList = (objects, marker, isTruncated) => ({
|
||||
type: APPEND_LIST,
|
||||
objects,
|
||||
@@ -54,47 +56,54 @@ export const appendList = (objects, marker, isTruncated) => ({
|
||||
|
||||
export const fetchObjects = append => {
|
||||
return function(dispatch, getState) {
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix, marker }
|
||||
} = getState()
|
||||
const {buckets: {currentBucket}, objects: {currentPrefix, marker}} = getState()
|
||||
if (currentBucket) {
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix,
|
||||
marker: append ? marker : ""
|
||||
})
|
||||
.then(res => {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
}
|
||||
})
|
||||
}
|
||||
if (append) {
|
||||
dispatch(appendList(objects, res.nextmarker, res.istruncated))
|
||||
} else {
|
||||
dispatch(setList(objects, res.nextmarker, res.istruncated))
|
||||
dispatch(setSortBy(""))
|
||||
dispatch(setSortOrder(false))
|
||||
}
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(alertActions.set({ type: "danger", message: err.message }))
|
||||
history.push("/login")
|
||||
})
|
||||
}
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix,
|
||||
marker: append ? marker : ""
|
||||
})
|
||||
.then(res => {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
}
|
||||
})
|
||||
}
|
||||
if (append) {
|
||||
dispatch(appendList(objects, res.nextmarker, res.istruncated))
|
||||
} else {
|
||||
dispatch(setList(objects, res.nextmarker, res.istruncated))
|
||||
dispatch(setSortBy(""))
|
||||
dispatch(setSortOrder(false))
|
||||
}
|
||||
dispatch(setPrefixWritable(res.writable))
|
||||
})
|
||||
.catch(err => {
|
||||
if (web.LoggedIn()) {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true
|
||||
})
|
||||
)
|
||||
dispatch(resetList())
|
||||
} else {
|
||||
history.push("/login")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
const {objects} = getState()
|
||||
const sortOrder = objects.sortBy == sortBy ? !objects.sortOrder : true
|
||||
dispatch(setSortBy(sortBy))
|
||||
dispatch(setSortOrder(sortOrder))
|
||||
@@ -194,30 +203,39 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
})
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName
|
||||
})
|
||||
.then(obj => {
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
dispatch(showShareObject(object, `${location.host}` + '/' + `${currentBucket}` + '/' + encodeURI(objectName)))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +281,7 @@ export const downloadObject = object => {
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=''`
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
@@ -292,7 +310,7 @@ export const downloadCheckedObjects = () => {
|
||||
objects: getCheckedList(state)
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token=''"
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
return web
|
||||
@@ -303,14 +321,13 @@ export const downloadCheckedObjects = () => {
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
.catch(err => dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -333,8 +350,7 @@ const downloadZip = (url, req, dispatch) => {
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
anchor.download = req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
window.URL.revokeObjectURL(blobUrl)
|
||||
|
||||
@@ -50,6 +50,13 @@ export default (
|
||||
marker: action.marker,
|
||||
isTruncated: action.isTruncated
|
||||
}
|
||||
case actionsObjects.RESET_LIST:
|
||||
return {
|
||||
...state,
|
||||
list: [],
|
||||
marker: "",
|
||||
isTruncated: false
|
||||
}
|
||||
case actionsObjects.APPEND_LIST:
|
||||
return {
|
||||
...state,
|
||||
|
||||
@@ -100,10 +100,24 @@ div.fesl-row {
|
||||
}
|
||||
}
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: default;
|
||||
}
|
||||
}
|
||||
|
||||
/*--------------------------
|
||||
Icons
|
||||
----------------------------*/
|
||||
&[data-type=folder] { .list-type(#a1d6dd, '\f114'); }
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f114');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
}
|
||||
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
|
||||
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
|
||||
&[data-type=audio] { .list-type(#009688, '\f1c7'); }
|
||||
|
||||
@@ -70,9 +70,9 @@ async.waterfall([
|
||||
commitId = stdout.replace('\n', '')
|
||||
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
|
||||
assetsFileName = 'ui-assets.go';
|
||||
var cmd = 'go-bindata-assetfs -pkg browser -nocompress=true production/...'
|
||||
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
|
||||
if (!isProduction) {
|
||||
cmd = 'go-bindata-assetfs -pkg browser -nocompress=true dev/...'
|
||||
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
|
||||
}
|
||||
console.log('Running', cmd)
|
||||
exec(cmd, cb)
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
"setupTestFrameworkScriptFile": "./app/js/jest/setup.js",
|
||||
"testURL": "https://localhost:8080",
|
||||
"moduleNameMapper": {
|
||||
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$":
|
||||
"<rootDir>/app/js/jest/__mocks__/fileMock.js",
|
||||
"\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/app/js/jest/__mocks__/fileMock.js",
|
||||
"\\.(css|scss)$": "identity-obj-proxy"
|
||||
}
|
||||
},
|
||||
@@ -40,29 +39,31 @@
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.14.0",
|
||||
"copy-webpack-plugin": "^0.3.3",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.3.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^2.30.1",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^2.7.1",
|
||||
"less-loader": "^2.2.3",
|
||||
"purifycss-webpack-plugin": "^2.0.3",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-dev-server": "^2.11.1"
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
},
|
||||
"dependencies": {
|
||||
"bootstrap": "^3.3.6",
|
||||
"classnames": "^2.2.3",
|
||||
"expect": "^1.20.2",
|
||||
"font-awesome": "^4.7.0",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
@@ -89,6 +90,6 @@
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^3.10.0"
|
||||
"webpack": "^4.28.3"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,11 +16,13 @@
|
||||
|
||||
var webpack = require('webpack')
|
||||
var path = require('path')
|
||||
var glob = require('glob-all')
|
||||
var CopyWebpackPlugin = require('copy-webpack-plugin')
|
||||
var purify = require("purifycss-webpack-plugin")
|
||||
var PurgecssPlugin = require('purgecss-webpack-plugin')
|
||||
|
||||
var exports = {
|
||||
context: __dirname,
|
||||
mode: 'development',
|
||||
entry: [
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
],
|
||||
@@ -99,12 +101,11 @@ var exports = {
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new purify({
|
||||
basePath: __dirname,
|
||||
paths: [
|
||||
"app/index.html",
|
||||
"app/js/*.js"
|
||||
]
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
path.join(__dirname, 'app/index.html'),
|
||||
path.join(__dirname, 'app/js/*.js')
|
||||
])
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
@@ -16,11 +16,13 @@
|
||||
|
||||
var webpack = require('webpack')
|
||||
var path = require('path')
|
||||
var glob = require('glob-all')
|
||||
var CopyWebpackPlugin = require('copy-webpack-plugin')
|
||||
var purify = require("purifycss-webpack-plugin")
|
||||
var PurgecssPlugin = require('purgecss-webpack-plugin')
|
||||
|
||||
var exports = {
|
||||
context: __dirname,
|
||||
mode: 'production',
|
||||
entry: [
|
||||
path.resolve(__dirname, 'app/index.js')
|
||||
],
|
||||
@@ -74,16 +76,12 @@ var exports = {
|
||||
{from: 'app/img/logo.svg'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
new webpack.DefinePlugin({
|
||||
'process.env.NODE_ENV': '"production"'
|
||||
}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new purify({
|
||||
basePath: __dirname,
|
||||
paths: [
|
||||
"app/index.html",
|
||||
"app/js/*.js"
|
||||
]
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
path.join(__dirname, 'app/index.html'),
|
||||
path.join(__dirname, 'app/js/*.js')
|
||||
])
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
3591
browser/yarn.lock
3591
browser/yarn.lock
File diff suppressed because it is too large
Load Diff
@@ -89,11 +89,11 @@ check_minimum_version() {
|
||||
|
||||
assert_is_supported_arch() {
|
||||
case "${ARCH}" in
|
||||
x86_64 | amd64 | aarch64 | arm* )
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x )
|
||||
return
|
||||
;;
|
||||
*)
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, arm*]"
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x]"
|
||||
exit 1
|
||||
esac
|
||||
}
|
||||
|
||||
35
buildscripts/cross-compile.sh
Executable file
35
buildscripts/cross-compile.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enable tracing if set.
|
||||
[ -n "$BASH_XTRACEFD" ] && set -ex
|
||||
|
||||
function _init() {
|
||||
## All binaries are static make sure to disable CGO.
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
|
||||
}
|
||||
|
||||
function _build_and_sign() {
|
||||
local osarch=$1
|
||||
IFS=/ read -r -a arr <<<"$osarch"
|
||||
os="${arr[0]}"
|
||||
arch="${arr[1]}"
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
go build -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
|
||||
for each_osarch in ${SUPPORTED_OSARCH}; do
|
||||
_build_and_sign "${each_osarch}"
|
||||
done
|
||||
}
|
||||
|
||||
_init && main "$@"
|
||||
60
buildscripts/gateway-tests.sh
Executable file
60
buildscripts/gateway-tests.sh
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
export SERVER_ENDPOINT=127.0.0.1:24240
|
||||
export ENABLE_HTTPS=0
|
||||
export ACCESS_KEY=minio
|
||||
export SECRET_KEY=minio123
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
|
||||
trap "cat server.log;cat gateway.log" SIGHUP SIGINT SIGTERM
|
||||
|
||||
./minio --quiet --json server data --address 127.0.0.1:24242 > server.log &
|
||||
sleep 3
|
||||
./minio --quiet --json gateway s3 http://127.0.0.1:24242 --address 127.0.0.1:24240 > gateway.log &
|
||||
sleep 3
|
||||
|
||||
mkdir -p /mint
|
||||
git clone https://github.com/minio/mint /mint
|
||||
cd /mint
|
||||
|
||||
export MINT_ROOT_DIR=${MINT_ROOT_DIR:-/mint}
|
||||
export MINT_RUN_CORE_DIR="$MINT_ROOT_DIR/run/core"
|
||||
export MINT_RUN_SECURITY_DIR="$MINT_ROOT_DIR/run/security"
|
||||
export WGET="wget --quiet --no-check-certificate"
|
||||
|
||||
go get github.com/go-ini/ini
|
||||
|
||||
./create-data-files.sh
|
||||
./preinstall.sh
|
||||
|
||||
# install mint app packages
|
||||
for pkg in "build"/*/install.sh; do
|
||||
echo "Running $pkg"
|
||||
$pkg
|
||||
done
|
||||
|
||||
./postinstall.sh
|
||||
|
||||
/mint/entrypoint.sh || cat server.log gateway.log fail
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
go test -coverprofile=profile.out -covermode=atomic "$d"
|
||||
CGO_ENABLED=0 go test -v -coverprofile=profile.out -covermode=atomic "$d"
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
|
||||
@@ -68,9 +68,41 @@ function start_minio_erasure_sets()
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 35
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
@@ -99,6 +131,8 @@ function start_minio_dist_erasure_sets()
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
@@ -157,6 +191,34 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets) )
|
||||
@@ -233,6 +295,7 @@ function run_test_gateway_s3()
|
||||
{
|
||||
minio_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
|
||||
export SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
@@ -275,6 +338,7 @@ function __init__()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i 's|-sS|-sSg|g' "$FUNCTIONAL_TESTS"
|
||||
chmod a+x "$FUNCTIONAL_TESTS"
|
||||
}
|
||||
|
||||
@@ -315,6 +379,13 @@ function main()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets with ipv6"
|
||||
if ! run_test_dist_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Gateway S3 setup"
|
||||
if ! run_test_gateway_s3; then
|
||||
echo "FAILED"
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -54,28 +55,30 @@ type accessControlPolicy struct {
|
||||
// This operation uses the ACL
|
||||
// subresource to return the ACL of a specified bucket.
|
||||
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketACL")
|
||||
ctx := newContext(r, w, "GetBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -89,7 +92,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,7 +104,9 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
// This operation uses the ACL
|
||||
// subresource to return the ACL of a specified object.
|
||||
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetObjectACL")
|
||||
ctx := newContext(r, w, "GetObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -109,21 +114,21 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getObjectACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object)
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,7 +142,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -26,8 +26,8 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -38,100 +38,194 @@ import (
|
||||
|
||||
var (
|
||||
configJSON = []byte(`{
|
||||
"version": "13",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
"version": "33",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"worm": "off",
|
||||
"storageclass": {
|
||||
"standard": "",
|
||||
"rrs": ""
|
||||
},
|
||||
"cache": {
|
||||
"drives": [],
|
||||
"expiry": 90,
|
||||
"maxuse": 80,
|
||||
"exclude": []
|
||||
},
|
||||
"kms": {
|
||||
"vault": {
|
||||
"endpoint": "",
|
||||
"auth": {
|
||||
"type": "",
|
||||
"approle": {
|
||||
"id": "",
|
||||
"secret": ""
|
||||
}
|
||||
},
|
||||
"key-id": {
|
||||
"name": "",
|
||||
"version": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"deliveryMode": 0,
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": "",
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false,
|
||||
"clientAuth": 0
|
||||
},
|
||||
"sasl": {
|
||||
"enable": false,
|
||||
"username": "",
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"mqtt": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"broker": "",
|
||||
"topic": "",
|
||||
"qos": 0,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"reconnectInterval": 0,
|
||||
"keepAliveInterval": 0,
|
||||
"queueDir": ""
|
||||
}
|
||||
},
|
||||
"mysql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"dsnString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"region": "us-west-1",
|
||||
"logger": {
|
||||
"console": {
|
||||
"enable": true,
|
||||
"level": "fatal"
|
||||
},
|
||||
"file": {
|
||||
"nsq": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"nsqdAddress": "",
|
||||
"topic": "",
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"fileName": "",
|
||||
"level": ""
|
||||
"skipVerify": false
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"clientID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}`)
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"logger": {
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"http": {
|
||||
"1": {
|
||||
"enabled": false,
|
||||
"endpoint": "https://username:password@example.com/api"
|
||||
}
|
||||
}
|
||||
},
|
||||
"compress": {
|
||||
"enabled": false,
|
||||
"extensions":[".txt",".log",".csv",".json"],
|
||||
"mime-types":["text/csv","text/plain","application/json"]
|
||||
},
|
||||
"openid": {
|
||||
"jwks": {
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"opa": {
|
||||
"url": "",
|
||||
"authToken": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
@@ -149,17 +243,17 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Initialize minio server config.
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer()
|
||||
if xlErr != nil {
|
||||
return nil, xlErr
|
||||
}
|
||||
|
||||
// Initialize minio server config.
|
||||
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
@@ -176,30 +270,25 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// Init global heal state
|
||||
initAllHealState(globalIsXL)
|
||||
|
||||
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
|
||||
// Create new policy system.
|
||||
globalPolicySys = NewPolicySys()
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
registerAdminRouter(adminRouter)
|
||||
registerAdminRouter(adminRouter, true, true)
|
||||
|
||||
return &adminXLTestBed{
|
||||
configPath: rootPath,
|
||||
xlDirs: xlDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
xlDirs: xlDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminXLTestBed) TearDown() {
|
||||
os.RemoveAll(atb.configPath)
|
||||
removeRoots(atb.xlDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
@@ -219,8 +308,8 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := fmt.Sprintf("%s-%d", objName, i)
|
||||
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
||||
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
|
||||
int64(len("hello")), "", ""), nil)
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("hello")),
|
||||
int64(len("hello")), "", ""), nil, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create %s - %v", objectName,
|
||||
err)
|
||||
@@ -232,14 +321,14 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
{
|
||||
objName := "mpObject"
|
||||
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
|
||||
objName, nil)
|
||||
objName, nil, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("mp new error: %v", err)
|
||||
}
|
||||
|
||||
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
||||
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
|
||||
[]byte("hello")), int64(len("hello")), "", ""))
|
||||
uploadID, 3, mustGetPutObjReader(t, bytes.NewReader(
|
||||
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("mp put error: %v", err)
|
||||
}
|
||||
@@ -418,6 +507,8 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials, body []byte) (*htt
|
||||
|
||||
// Set body
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
req.ContentLength = int64(len(body))
|
||||
|
||||
// Set sha-sum header
|
||||
req.Header.Set("X-Amz-Content-Sha256", getSHA256Hash(body))
|
||||
|
||||
@@ -442,17 +533,22 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Setting up a go routine to simulate ServerRouter's
|
||||
// handleServiceSignals for stop and restart commands.
|
||||
if cmd == restartCmd {
|
||||
go testServiceSignalReceiver(cmd, t)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
testServiceSignalReceiver(cmd, t)
|
||||
}()
|
||||
}
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
|
||||
body, err := json.Marshal(madmin.ServiceAction{
|
||||
cmd.toServiceActionValue()})
|
||||
Action: cmd.toServiceActionValue()})
|
||||
if err != nil {
|
||||
t.Fatalf("JSONify error: %v", err)
|
||||
}
|
||||
@@ -483,6 +579,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
|
||||
http.StatusOK, rec.Code, string(resp))
|
||||
}
|
||||
|
||||
// Wait until testServiceSignalReceiver() called in a goroutine quits.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Test for service status management REST API.
|
||||
@@ -507,7 +606,6 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
|
||||
@@ -538,8 +636,14 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("JSONify err: %v", err)
|
||||
}
|
||||
|
||||
ebody, err := madmin.EncryptData(credentials.SecretKey, body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Construct setCreds request
|
||||
req, err := getServiceCmdRequest(setCreds, credentials, body)
|
||||
req, err := getServiceCmdRequest(setCreds, credentials, ebody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build service status request %v", err)
|
||||
}
|
||||
@@ -570,193 +674,6 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// mkLockQueryVal - helper function to build lock query param.
|
||||
func mkLockQueryVal(bucket, prefix, durationStr string) url.Values {
|
||||
qVal := url.Values{}
|
||||
qVal.Set(string(mgmtBucket), bucket)
|
||||
qVal.Set(string(mgmtPrefix), prefix)
|
||||
qVal.Set(string(mgmtLockOlderThan), durationStr)
|
||||
return qVal
|
||||
}
|
||||
|
||||
// Test for locks list management REST API.
|
||||
func TestListLocksHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
prefix string
|
||||
duration string
|
||||
expectedStatus int
|
||||
}{
|
||||
// Test 1 - valid testcase
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myobject",
|
||||
duration: "1s",
|
||||
expectedStatus: http.StatusOK,
|
||||
},
|
||||
// Test 2 - invalid duration
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myprefix",
|
||||
duration: "invalidDuration",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 3 - invalid bucket name
|
||||
{
|
||||
bucket: `invalid\\Bucket`,
|
||||
prefix: "myprefix",
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 4 - invalid prefix
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: `invalid\\Prefix`,
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.duration)
|
||||
req, err := newTestRequest("GET", "/minio/admin/v1/locks?"+queryVal.Encode(), 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d - Failed to construct list locks request - %v", i+1, err)
|
||||
}
|
||||
|
||||
cred := globalServerConfig.GetCredential()
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d - Failed to sign list locks request - %v", i+1, err)
|
||||
}
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if test.expectedStatus != rec.Code {
|
||||
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test for locks clear management REST API.
|
||||
func TestClearLocksHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
prefix string
|
||||
duration string
|
||||
expectedStatus int
|
||||
}{
|
||||
// Test 1 - valid testcase
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myobject",
|
||||
duration: "1s",
|
||||
expectedStatus: http.StatusOK,
|
||||
},
|
||||
// Test 2 - invalid duration
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: "myprefix",
|
||||
duration: "invalidDuration",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 3 - invalid bucket name
|
||||
{
|
||||
bucket: `invalid\\Bucket`,
|
||||
prefix: "myprefix",
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test 4 - invalid prefix
|
||||
{
|
||||
bucket: "mybucket",
|
||||
prefix: `invalid\\Prefix`,
|
||||
duration: "1h",
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
queryVal := mkLockQueryVal(test.bucket, test.prefix, test.duration)
|
||||
req, err := newTestRequest("DELETE", "/minio/admin/v1/locks?"+queryVal.Encode(), 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d - Failed to construct clear locks request - %v", i+1, err)
|
||||
}
|
||||
|
||||
cred := globalServerConfig.GetCredential()
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d - Failed to sign clear locks request - %v", i+1, err)
|
||||
}
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if test.expectedStatus != rec.Code {
|
||||
t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test for lock query param validation helper function.
|
||||
func TestValidateLockQueryParams(t *testing.T) {
|
||||
// reset globals.
|
||||
// this is to make sure that the tests are not affected by modified globals.
|
||||
resetTestGlobals()
|
||||
// initialize NSLock.
|
||||
initNSLock(false)
|
||||
// Sample query values for test cases.
|
||||
allValidVal := mkLockQueryVal("bucket", "prefix", "1s")
|
||||
invalidBucketVal := mkLockQueryVal(`invalid\\Bucket`, "prefix", "1s")
|
||||
invalidPrefixVal := mkLockQueryVal("bucket", `invalid\\Prefix`, "1s")
|
||||
invalidOlderThanVal := mkLockQueryVal("bucket", "prefix", "invalidDuration")
|
||||
|
||||
testCases := []struct {
|
||||
qVals url.Values
|
||||
apiErr APIErrorCode
|
||||
}{
|
||||
{
|
||||
qVals: invalidBucketVal,
|
||||
apiErr: ErrInvalidBucketName,
|
||||
},
|
||||
{
|
||||
qVals: invalidPrefixVal,
|
||||
apiErr: ErrInvalidObjectName,
|
||||
},
|
||||
{
|
||||
qVals: invalidOlderThanVal,
|
||||
apiErr: ErrInvalidDuration,
|
||||
},
|
||||
{
|
||||
qVals: allValidVal,
|
||||
apiErr: ErrNone,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
_, _, _, apiErr := validateLockQueryParams(test.qVals)
|
||||
if apiErr != test.apiErr {
|
||||
t.Errorf("Test %d - Expected error %v but received %v", i+1, test.apiErr, apiErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildAdminRequest - helper function to build an admin API request.
|
||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
@@ -787,7 +704,6 @@ func TestGetConfigHandler(t *testing.T) {
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
// Prepare query params for get-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
@@ -816,18 +732,19 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
// SetConfigHandler restarts minio setup - need to start a
|
||||
// signal receiver to receive on globalServiceSignalCh.
|
||||
go testServiceSignalReceiver(restartCmd, t)
|
||||
|
||||
// Prepare query params for set-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("config", "")
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
econfigJSON, err := madmin.EncryptData(password, configJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(configJSON)), bytes.NewReader(configJSON))
|
||||
int64(len(econfigJSON)), bytes.NewReader(econfigJSON))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
@@ -835,23 +752,13 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected to succeed but failed with %d", rec.Code)
|
||||
}
|
||||
|
||||
result := setConfigResult{}
|
||||
err = json.NewDecoder(rec.Body).Decode(&result)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode set config result json %v", err)
|
||||
}
|
||||
|
||||
if !result.Status {
|
||||
t.Error("Expected set-config to succeed, but failed")
|
||||
t.Errorf("Expected to succeed but failed with %d, body: %s", rec.Code, rec.Body)
|
||||
}
|
||||
|
||||
// Check that a very large config file returns an error.
|
||||
{
|
||||
// Make a large enough config string
|
||||
invalidCfg := []byte(strings.Repeat("A", maxConfigJSONSize+1))
|
||||
invalidCfg := []byte(strings.Repeat("A", maxEConfigJSONSize+1))
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
@@ -870,7 +777,7 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
// Check that a config with duplicate keys in an object return
|
||||
// error.
|
||||
{
|
||||
invalidCfg := append(configJSON[:len(configJSON)-1], []byte(`, "version": "15"}`)...)
|
||||
invalidCfg := append(econfigJSON[:len(econfigJSON)-1], []byte(`, "version": "15"}`)...)
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
@@ -881,7 +788,7 @@ func TestSetConfigHandler(t *testing.T) {
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := string(rec.Body.Bytes())
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "JSON configuration provided has objects with duplicate keys") {
|
||||
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
}
|
||||
}
|
||||
@@ -896,7 +803,6 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
initGlobalAdminPeers(mustGetNewEndpointList("http://127.0.0.1:9000/d1"))
|
||||
|
||||
// Prepare query params for set-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
@@ -955,12 +861,12 @@ func TestToAdminAPIErr(t *testing.T) {
|
||||
// 3. Non-admin API specific error.
|
||||
{
|
||||
err: errDiskNotFound,
|
||||
expectedAPIErr: toAPIErrorCode(errDiskNotFound),
|
||||
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actualErr := toAdminAPIErrCode(test.err)
|
||||
actualErr := toAdminAPIErrCode(context.Background(), test.err)
|
||||
if actualErr != test.expectedAPIErr {
|
||||
t.Errorf("Test %d: Expected %v but received %v",
|
||||
i+1, test.expectedAPIErr, actualErr)
|
||||
@@ -968,81 +874,6 @@ func TestToAdminAPIErr(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteSetConfigResponse(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
testCases := []struct {
|
||||
status bool
|
||||
errs []error
|
||||
}{
|
||||
// 1. all nodes returned success.
|
||||
{
|
||||
status: true,
|
||||
errs: []error{nil, nil, nil, nil},
|
||||
},
|
||||
// 2. some nodes returned errors.
|
||||
{
|
||||
status: false,
|
||||
errs: []error{errDiskNotFound, nil, errDiskAccessDenied, errFaultyDisk},
|
||||
},
|
||||
}
|
||||
|
||||
testPeers := []adminPeer{
|
||||
{
|
||||
addr: "localhost:9001",
|
||||
},
|
||||
{
|
||||
addr: "localhost:9002",
|
||||
},
|
||||
{
|
||||
addr: "localhost:9003",
|
||||
},
|
||||
{
|
||||
addr: "localhost:9004",
|
||||
},
|
||||
}
|
||||
|
||||
testURL, err := url.Parse("http://dummy.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse a place-holder url")
|
||||
}
|
||||
|
||||
var actualResult setConfigResult
|
||||
for i, test := range testCases {
|
||||
rec := httptest.NewRecorder()
|
||||
writeSetConfigResponse(rec, testPeers, test.errs, test.status, testURL)
|
||||
resp := rec.Result()
|
||||
jsonBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to read response %v", i+1, err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(jsonBytes, &actualResult)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to unmarshal json %v", i+1, err)
|
||||
}
|
||||
if actualResult.Status != test.status {
|
||||
t.Errorf("Test %d: Expected status %v but received %v", i+1, test.status, actualResult.Status)
|
||||
}
|
||||
for p, res := range actualResult.NodeResults {
|
||||
if res.Name != testPeers[p].addr {
|
||||
t.Errorf("Test %d: Expected node name %s but received %s", i+1, testPeers[p].addr, res.Name)
|
||||
}
|
||||
expectedErrMsg := fmt.Sprintf("%v", test.errs[p])
|
||||
if res.ErrMsg != expectedErrMsg {
|
||||
t.Errorf("Test %d: Expected error %s but received %s", i+1, expectedErrMsg, res.ErrMsg)
|
||||
}
|
||||
expectedErrSet := test.errs[p] != nil
|
||||
if res.ErrSet != expectedErrSet {
|
||||
t.Errorf("Test %d: Expected ErrSet %v but received %v", i+1, expectedErrSet, res.ErrSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mkHealStartReq(t *testing.T, bucket, prefix string,
|
||||
opts madmin.HealOpts) *http.Request {
|
||||
|
||||
@@ -1129,81 +960,3 @@ func collectHealResults(t *testing.T, adminTestBed *adminXLTestBed, bucket,
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func TestHealStartNStatusHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// gen. test data
|
||||
adminTestBed.GenerateHealTestData(t)
|
||||
defer adminTestBed.CleanupHealTestData(t)
|
||||
|
||||
// Prepare heal-start request to send to the server.
|
||||
healOpts := madmin.HealOpts{
|
||||
Recursive: true,
|
||||
DryRun: false,
|
||||
}
|
||||
bucketName, objName := "mybucket", "myobject-0"
|
||||
var hss madmin.HealStartSuccess
|
||||
|
||||
{
|
||||
req := mkHealStartReq(t, bucketName, objName, healOpts)
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if http.StatusOK != rec.Code {
|
||||
t.Errorf("Unexpected status code - got %d but expected %d",
|
||||
rec.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(rec.Body.Bytes(), &hss)
|
||||
if err != nil {
|
||||
t.Fatal("unable to unmarshal response")
|
||||
}
|
||||
|
||||
if hss.ClientToken == "" {
|
||||
t.Errorf("unexpected result")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// test with an invalid client token
|
||||
req := mkHealStatusReq(t, bucketName, objName, hss.ClientToken+hss.ClientToken)
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Errorf("Unexpected status code")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// fetch heal status
|
||||
results := collectHealResults(t, adminTestBed, bucketName,
|
||||
objName, hss.ClientToken, 5)
|
||||
|
||||
// check if we got back an expected record
|
||||
foundIt := false
|
||||
for _, item := range results.Items {
|
||||
if item.Type == madmin.HealItemObject &&
|
||||
item.Bucket == bucketName && item.Object == objName {
|
||||
foundIt = true
|
||||
}
|
||||
}
|
||||
if !foundIt {
|
||||
t.Error("did not find expected heal record in heal results")
|
||||
}
|
||||
|
||||
// check that the heal settings in the results is the
|
||||
// same as what we started the heal seq. with.
|
||||
if results.HealSettings != healOpts {
|
||||
t.Errorf("unexpected heal settings: %v",
|
||||
results.HealSettings)
|
||||
}
|
||||
|
||||
if results.Summary == healStoppedStatus {
|
||||
t.Errorf("heal sequence stopped unexpectedly")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,12 +20,15 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
// healStatusSummary - overall short summary of a healing sequence
|
||||
@@ -59,8 +62,8 @@ var (
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(err error) error {
|
||||
errCode := toAPIErrorCode(err)
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
errCode := toAPIErrorCode(ctx, err)
|
||||
apiErr := getAPIError(errCode)
|
||||
return fmt.Errorf("Heal internal error: %s: %s",
|
||||
apiErr.Code, apiErr.Description)
|
||||
@@ -110,6 +113,32 @@ func initAllHealState(isErasureMode bool) {
|
||||
globalAllHealState = allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go globalAllHealState.periodicHealSeqsClean()
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
|
||||
delete(ahs.healSeqMap, path)
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getHealSequence - Retrieve a heal sequence by path. The second
|
||||
@@ -121,6 +150,35 @@ func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists b
|
||||
return h, exists
|
||||
}
|
||||
|
||||
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIErrorCode) {
|
||||
var hsp madmin.HealStopSuccess
|
||||
he, exists := ahs.getHealSequence(path)
|
||||
if !exists {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: "invalid",
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: he.clientToken,
|
||||
ClientAddress: he.clientAddress,
|
||||
StartTime: he.startTime,
|
||||
}
|
||||
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
// Heal sequence explicitly stopped, remove it.
|
||||
delete(ahs.healSeqMap, path)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErrCode(context.Background(), err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
// healing according to the healSequence argument. For each heal
|
||||
// sequence, state is stored in the `globalAllHealState`, which is a
|
||||
@@ -141,20 +199,20 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
existsAndLive = true
|
||||
}
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
// A heal sequence exists on the given path.
|
||||
if h.forceStarted {
|
||||
// stop the running heal sequence - wait for
|
||||
// it to finish.
|
||||
// stop the running heal sequence - wait for it to finish.
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
} else {
|
||||
errMsg = "Heal is already running on the given path " +
|
||||
"(use force-start option to stop and start afresh). " +
|
||||
fmt.Sprintf("The heal was started by IP %s at %s",
|
||||
h.clientAddress, h.startTime)
|
||||
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
|
||||
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
|
||||
|
||||
return nil, ErrHealAlreadyRunning, errMsg
|
||||
}
|
||||
@@ -181,48 +239,13 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
go func() {
|
||||
var keepStateTimeout <-chan time.Time
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
everyMinute := ticker.C
|
||||
for {
|
||||
select {
|
||||
// Check every minute if heal sequence has ended.
|
||||
case <-everyMinute:
|
||||
if h.hasEnded() {
|
||||
keepStateTimeout = time.After(keepHealSeqStateDuration)
|
||||
everyMinute = nil
|
||||
}
|
||||
|
||||
// This case does not fire until the heal
|
||||
// sequence completes.
|
||||
case <-keepStateTimeout:
|
||||
// Heal sequence has ended, keep
|
||||
// results state duration has elapsed,
|
||||
// so purge state.
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
delete(ahs.healSeqMap, h.path)
|
||||
return
|
||||
|
||||
case <-globalServiceDoneCh:
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: h.clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError, ""
|
||||
}
|
||||
return b, ErrNone, ""
|
||||
@@ -270,7 +293,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
@@ -283,12 +306,15 @@ type healSequence struct {
|
||||
// bucket, and prefix on which heal seq. was initiated
|
||||
bucket, objPrefix string
|
||||
|
||||
// path is just bucket + "/" + objPrefix
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// time at which heal sequence was started
|
||||
startTime time.Time
|
||||
|
||||
// time at which heal sequence has ended
|
||||
endTime time.Time
|
||||
|
||||
// Heal client info
|
||||
clientToken, clientAddress string
|
||||
|
||||
@@ -328,7 +354,7 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
return &healSequence{
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: bucket + "/" + objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientAddress: clientAddr,
|
||||
@@ -383,7 +409,6 @@ func (h *healSequence) stop() {
|
||||
// sequence automatically resumes. The return value indicates if the
|
||||
// operation succeeded.
|
||||
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
// start a timer to keep an upper time limit to find an empty
|
||||
// slot to add the given heal result - if no slot is found it
|
||||
// means that the server is holding the maximum amount of
|
||||
@@ -467,6 +492,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
@@ -480,6 +506,7 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
@@ -519,6 +546,9 @@ func (h *healSequence) traverseAndHeal() {
|
||||
// Start with format healing
|
||||
checkErr(h.healDiskFormat)
|
||||
|
||||
// Start healing the config.
|
||||
checkErr(h.healConfig)
|
||||
|
||||
// Heal buckets and objects
|
||||
checkErr(h.healBuckets)
|
||||
|
||||
@@ -529,9 +559,72 @@ func (h *healSequence) traverseAndHeal() {
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healConfig - heals config.json, retrun value indicates if a failure occurred.
|
||||
func (h *healSequence) healConfig() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on configs is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// configs are always uptodate and correct.
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
for isTruncated {
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Lists all objects under `config` prefix.
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, minioMetaBucket, minioConfigPrefix,
|
||||
marker, "", 1000)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for index := range objectInfos.Objects {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
o := objectInfos.Objects[index]
|
||||
res, herr := objectAPI.HealObject(h.ctx, o.Bucket, o.Name, h.settings.DryRun, h.settings.Remove)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this file an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
continue
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
res.Type = madmin.HealItemBucketMetadata
|
||||
if err = h.pushHealResultItem(res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
isTruncated = objectInfos.IsTruncated
|
||||
marker = objectInfos.NextMarker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func (h *healSequence) healDiskFormat() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
@@ -542,13 +635,18 @@ func (h *healSequence) healDiskFormat() error {
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
if err != nil && err != errNoHealRequired {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers only if healing succeeded.
|
||||
if err == nil {
|
||||
peersReInitFormat(globalAdminPeers, h.settings.DryRun)
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(h.settings.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(h.ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(h.ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push format heal result
|
||||
@@ -557,6 +655,10 @@ func (h *healSequence) healDiskFormat() error {
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket)
|
||||
@@ -570,7 +672,7 @@ func (h *healSequence) healBuckets() error {
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
@@ -584,17 +686,13 @@ func (h *healSequence) healBuckets() error {
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun)
|
||||
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun, h.settings.Remove)
|
||||
// push any available results before checking for error
|
||||
for _, result := range results {
|
||||
if perr := h.pushHealResultItem(result); perr != nil {
|
||||
@@ -610,10 +708,9 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix)
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
if err == nil {
|
||||
err = h.healObject(bucket, h.objPrefix)
|
||||
if err != nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -622,17 +719,40 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := runtime.NumCPU()
|
||||
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
for isTruncated {
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
|
||||
h.objPrefix, marker, "", 1000)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to heal
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the heal.
|
||||
for globalHTTPServer.GetRequestCount() > 2 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
for _, o := range objectInfos.Objects {
|
||||
if err := h.healObject(o.Bucket, o.Name); err != nil {
|
||||
// Heal numCPU * nodes objects at a time.
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
|
||||
h.objPrefix, marker, "", entries)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
g := errgroup.WithNErrs(len(objectInfos.Objects))
|
||||
for index := range objectInfos.Objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
o := objectInfos.Objects[index]
|
||||
return h.healObject(o.Bucket, o.Name)
|
||||
}, index)
|
||||
}
|
||||
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -655,7 +775,10 @@ func (h *healSequence) healObject(bucket, object string) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun)
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun, h.settings.Remove)
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
hri.Detail = err.Error()
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018, 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -31,7 +31,7 @@ type adminAPIHandlers struct {
|
||||
}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router) {
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
@@ -53,26 +53,71 @@ func registerAdminRouter(router *mux.Router) {
|
||||
// Info operations
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
/// Lock operations
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// List Locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/locks").HandlerFunc(httpTraceAll(adminAPI.ListLocksHandler))
|
||||
// Clear locks
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/locks").HandlerFunc(httpTraceAll(adminAPI.ClearLocksHandler))
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
/// Heal operations
|
||||
/// Health operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceHdrs(adminAPI.UpdateAdminCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
|
||||
// Update credentials
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config/credential").HandlerFunc(httpTraceAll(adminAPI.UpdateCredentialsHandler))
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceAll(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceAll(adminAPI.SetConfigHandler))
|
||||
// Get config keys/values
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKeysHandler))
|
||||
// Set config keys/values
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config-keys").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKeysHandler))
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-policy").HandlerFunc(httpTraceHdrs(adminAPI.SetUserPolicy)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("name", "{name:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove user IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
|
||||
// If none of the routes match, return error.
|
||||
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
|
||||
}
|
||||
|
||||
@@ -1,551 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
|
||||
|
||||
// AdminRPCClient - admin RPC client talks to admin RPC server.
|
||||
type AdminRPCClient struct {
|
||||
*RPCClient
|
||||
}
|
||||
|
||||
// SignalService - calls SignalService RPC.
|
||||
func (rpcClient *AdminRPCClient) SignalService(signal serviceSignal) (err error) {
|
||||
args := SignalServiceArgs{Sig: signal}
|
||||
reply := VoidReply{}
|
||||
|
||||
return rpcClient.Call(adminServiceName+".SignalService", &args, &reply)
|
||||
}
|
||||
|
||||
// ReInitFormat - re-initialize disk format, remotely.
|
||||
func (rpcClient *AdminRPCClient) ReInitFormat(dryRun bool) error {
|
||||
args := ReInitFormatArgs{DryRun: dryRun}
|
||||
reply := VoidReply{}
|
||||
|
||||
return rpcClient.Call(adminServiceName+".ReInitFormat", &args, &reply)
|
||||
}
|
||||
|
||||
// ListLocks - Sends list locks command to remote server via RPC.
|
||||
func (rpcClient *AdminRPCClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
args := ListLocksQuery{
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
Duration: duration,
|
||||
}
|
||||
var reply []VolumeLockInfo
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".ListLocks", &args, &reply)
|
||||
return reply, err
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info of the server to which the RPC call is made.
|
||||
func (rpcClient *AdminRPCClient) ServerInfo() (sid ServerInfoData, err error) {
|
||||
err = rpcClient.Call(adminServiceName+".ServerInfo", &AuthArgs{}, &sid)
|
||||
return sid, err
|
||||
}
|
||||
|
||||
// GetConfig - returns config.json of the remote server.
|
||||
func (rpcClient *AdminRPCClient) GetConfig() ([]byte, error) {
|
||||
args := AuthArgs{}
|
||||
var reply []byte
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".GetConfig", &args, &reply)
|
||||
return reply, err
|
||||
}
|
||||
|
||||
// WriteTmpConfig - writes config file content to a temporary file on a remote node.
|
||||
func (rpcClient *AdminRPCClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
|
||||
args := WriteConfigArgs{
|
||||
TmpFileName: tmpFileName,
|
||||
Buf: configBytes,
|
||||
}
|
||||
reply := VoidReply{}
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".WriteTmpConfig", &args, &reply)
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// CommitConfig - Move the new config in tmpFileName onto config.json on a remote node.
|
||||
func (rpcClient *AdminRPCClient) CommitConfig(tmpFileName string) error {
|
||||
args := CommitConfigArgs{FileName: tmpFileName}
|
||||
reply := VoidReply{}
|
||||
|
||||
err := rpcClient.Call(adminServiceName+".CommitConfig", &args, &reply)
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// NewAdminRPCClient - returns new admin RPC client.
|
||||
func NewAdminRPCClient(host *xnet.Host) (*AdminRPCClient, error) {
|
||||
scheme := "http"
|
||||
if globalIsSSL {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
serviceURL := &xnet.URL{
|
||||
Scheme: scheme,
|
||||
Host: host.String(),
|
||||
Path: adminServicePath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: host.Name,
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
rpcClient, err := NewRPCClient(
|
||||
RPCClientArgs{
|
||||
NewAuthTokenFunc: newAuthToken,
|
||||
RPCVersion: globalRPCAPIVersion,
|
||||
ServiceName: adminServiceName,
|
||||
ServiceURL: serviceURL,
|
||||
TLSConfig: tlsConfig,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AdminRPCClient{rpcClient}, nil
|
||||
}
|
||||
|
||||
// adminCmdRunner - abstracts local and remote execution of admin
|
||||
// commands like service stop and service restart.
|
||||
type adminCmdRunner interface {
|
||||
SignalService(s serviceSignal) error
|
||||
ReInitFormat(dryRun bool) error
|
||||
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ServerInfo() (ServerInfoData, error)
|
||||
GetConfig() ([]byte, error)
|
||||
WriteTmpConfig(tmpFileName string, configBytes []byte) error
|
||||
CommitConfig(tmpFileName string) error
|
||||
}
|
||||
|
||||
// adminPeer - represents an entity that implements admin API RPCs.
|
||||
type adminPeer struct {
|
||||
addr string
|
||||
cmdRunner adminCmdRunner
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
// type alias for a collection of adminPeer.
|
||||
type adminPeers []adminPeer
|
||||
|
||||
// makeAdminPeers - helper function to construct a collection of adminPeer.
|
||||
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
|
||||
localAddr := GetLocalPeer(endpoints)
|
||||
if strings.HasPrefix(localAddr, "127.0.0.1:") {
|
||||
// Use first IPv4 instead of loopback address.
|
||||
localAddr = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
|
||||
}
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: localAddr,
|
||||
cmdRunner: localAdminClient{},
|
||||
isLocal: true,
|
||||
})
|
||||
|
||||
for _, hostStr := range GetRemotePeers(endpoints) {
|
||||
host, err := xnet.ParseHost(hostStr)
|
||||
logger.FatalIf(err, "Unable to parse Admin RPC Host", context.Background())
|
||||
rpcClient, err := NewAdminRPCClient(host)
|
||||
logger.FatalIf(err, "Unable to initialize Admin RPC Client", context.Background())
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: hostStr,
|
||||
cmdRunner: rpcClient,
|
||||
})
|
||||
}
|
||||
|
||||
return adminPeerList
|
||||
}
|
||||
|
||||
// peersReInitFormat - reinitialize remote object layers to new format.
|
||||
func peersReInitFormat(peers adminPeers, dryRun bool) error {
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Send ReInitFormat RPC call to all nodes.
|
||||
// for local adminPeer this is a no-op.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
if !peer.isLocal {
|
||||
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
|
||||
}
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize global adminPeer collection.
|
||||
func initGlobalAdminPeers(endpoints EndpointList) {
|
||||
globalAdminPeers = makeAdminPeers(endpoints)
|
||||
}
|
||||
|
||||
// invokeServiceCmd - Invoke Restart/Stop command.
|
||||
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
|
||||
switch cmd {
|
||||
case serviceRestart, serviceStop:
|
||||
err = cp.cmdRunner.SignalService(cmd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// sendServiceCmd - Invoke Restart command on remote peers
|
||||
// adminPeer followed by on the local peer.
|
||||
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
|
||||
// Send service command like stop or restart to all remote nodes and finally run on local node.
|
||||
errs := make([]error, len(cps))
|
||||
var wg sync.WaitGroup
|
||||
remotePeers := cps[1:]
|
||||
for i := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
|
||||
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
errs[0] = invokeServiceCmd(cps[0], cmd)
|
||||
}
|
||||
|
||||
// listPeerLocksInfo - fetch list of locks held on the given bucket,
|
||||
// matching prefix held longer than duration from all peer servers.
|
||||
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
// Used to aggregate volume lock information from all nodes.
|
||||
allLocks := make([][]VolumeLockInfo, len(peers))
|
||||
errs := make([]error, len(peers))
|
||||
var wg sync.WaitGroup
|
||||
localPeer := peers[0]
|
||||
remotePeers := peers[1:]
|
||||
for i, remotePeer := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int, remotePeer adminPeer) {
|
||||
defer wg.Done()
|
||||
// `remotePeers` is right-shifted by one position relative to `peers`
|
||||
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
}(i+1, remotePeer)
|
||||
}
|
||||
wg.Wait()
|
||||
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
|
||||
// Summarizing errors received for ListLocks RPC across all
|
||||
// nodes. N B the possible unavailability of quorum in errors
|
||||
// applies only to distributed setup.
|
||||
errCount, err := reduceErrs(errs, []error{})
|
||||
if err != nil {
|
||||
if errCount >= (len(peers)/2 + 1) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
// Group lock information across nodes by (bucket, object)
|
||||
// pair. For readability only.
|
||||
paramLockMap := make(map[nsParam][]VolumeLockInfo)
|
||||
for _, nodeLocks := range allLocks {
|
||||
for _, lockInfo := range nodeLocks {
|
||||
param := nsParam{
|
||||
volume: lockInfo.Bucket,
|
||||
path: lockInfo.Object,
|
||||
}
|
||||
paramLockMap[param] = append(paramLockMap[param], lockInfo)
|
||||
}
|
||||
}
|
||||
groupedLockInfos := []VolumeLockInfo{}
|
||||
for _, volLocks := range paramLockMap {
|
||||
groupedLockInfos = append(groupedLockInfos, volLocks...)
|
||||
}
|
||||
return groupedLockInfos, nil
|
||||
}
|
||||
|
||||
// uptimeSlice - used to sort uptimes in chronological order.
|
||||
type uptimeSlice []struct {
|
||||
err error
|
||||
uptime time.Duration
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Less(i, j int) bool {
|
||||
return ts[i].uptime < ts[j].uptime
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Swap(i, j int) {
|
||||
ts[i], ts[j] = ts[j], ts[i]
|
||||
}
|
||||
|
||||
// getPeerUptimes - returns the uptime since the last time read quorum
|
||||
// was established on success. Otherwise returns errXLReadQuorum.
|
||||
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
||||
// In a single node Erasure or FS backend setup the uptime of
|
||||
// the setup is the uptime of the single minio server
|
||||
// instance.
|
||||
if !globalIsDistXL {
|
||||
return UTCNow().Sub(globalBootTime), nil
|
||||
}
|
||||
|
||||
uptimes := make(uptimeSlice, len(peers))
|
||||
|
||||
// Get up time of all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
serverInfoData, rpcErr := peer.cmdRunner.ServerInfo()
|
||||
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Sort uptimes in chronological order.
|
||||
sort.Sort(uptimes)
|
||||
|
||||
// Pick the readQuorum'th uptime in chronological order. i.e,
|
||||
// the time at which read quorum was (re-)established.
|
||||
readQuorum := len(uptimes) / 2
|
||||
validCount := 0
|
||||
latestUptime := time.Duration(0)
|
||||
for _, uptime := range uptimes {
|
||||
if uptime.err != nil {
|
||||
logger.LogIf(context.Background(), uptime.err)
|
||||
continue
|
||||
}
|
||||
|
||||
validCount++
|
||||
if validCount >= readQuorum {
|
||||
latestUptime = uptime.uptime
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Less than readQuorum "Admin.Uptime" RPC call returned
|
||||
// successfully, so read-quorum unavailable.
|
||||
if validCount < readQuorum {
|
||||
return time.Duration(0), InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
return latestUptime, nil
|
||||
}
|
||||
|
||||
// getPeerConfig - Fetches config.json from all nodes in the setup and
|
||||
// returns the one that occurs in a majority of them.
|
||||
func getPeerConfig(peers adminPeers) ([]byte, error) {
|
||||
if !globalIsDistXL {
|
||||
return peers[0].cmdRunner.GetConfig()
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
configs := make([][]byte, len(peers))
|
||||
|
||||
// Get config from all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
configs[idx], errs[idx] = peer.cmdRunner.GetConfig()
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Find the maximally occurring config among peers in a
|
||||
// distributed setup.
|
||||
|
||||
serverConfigs := make([]serverConfig, len(peers))
|
||||
for i, configBytes := range configs {
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Unmarshal the received config files.
|
||||
err := json.Unmarshal(configBytes, &serverConfigs[i])
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configJSON, err := getValidServerConfig(serverConfigs, errs)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the config.json that was present quorum or more
|
||||
// number of disks.
|
||||
return json.Marshal(configJSON)
|
||||
}
|
||||
|
||||
// getValidServerConfig - finds the server config that is present in
|
||||
// quorum or more number of servers.
|
||||
func getValidServerConfig(serverConfigs []serverConfig, errs []error) (scv serverConfig, e error) {
|
||||
// majority-based quorum
|
||||
quorum := len(serverConfigs)/2 + 1
|
||||
|
||||
// Count the number of disks a config.json was found in.
|
||||
configCounter := make([]int, len(serverConfigs))
|
||||
|
||||
// We group equal serverConfigs by the lowest index of the
|
||||
// same value; e.g, let us take the following serverConfigs
|
||||
// in a 4-node setup,
|
||||
// serverConfigs == [c1, c2, c1, c1]
|
||||
// configCounter == [3, 1, 0, 0]
|
||||
// c1, c2 are the only distinct values that appear. c1 is
|
||||
// identified by 0, the lowest index it appears in and c2 is
|
||||
// identified by 1. So, we need to find the number of times
|
||||
// each of these distinct values occur.
|
||||
|
||||
// Invariants:
|
||||
|
||||
// 1. At the beginning of the i-th iteration, the number of
|
||||
// unique configurations seen so far is equal to the number of
|
||||
// non-zero counter values in config[:i].
|
||||
|
||||
// 2. At the beginning of the i-th iteration, the sum of
|
||||
// elements of configCounter[:i] is equal to the number of
|
||||
// non-error configurations seen so far.
|
||||
|
||||
// For each of the serverConfig ...
|
||||
for i := range serverConfigs {
|
||||
// Skip nodes where getConfig failed.
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
// Check if it is equal to any of the configurations
|
||||
// seen so far. If j == i is reached then we have an
|
||||
// unseen configuration.
|
||||
for j := 0; j <= i; j++ {
|
||||
if j < i && configCounter[j] == 0 {
|
||||
// serverConfigs[j] is known to be
|
||||
// equal to a value that was already
|
||||
// seen. See example above for
|
||||
// clarity.
|
||||
continue
|
||||
} else if j < i && serverConfigs[i].ConfigDiff(&serverConfigs[j]) == "" {
|
||||
// serverConfigs[i] is equal to
|
||||
// serverConfigs[j], update
|
||||
// serverConfigs[j]'s counter since it
|
||||
// is the lower index.
|
||||
configCounter[j]++
|
||||
break
|
||||
} else if j == i {
|
||||
// serverConfigs[i] is equal to no
|
||||
// other value seen before. It is
|
||||
// unique so far.
|
||||
configCounter[i] = 1
|
||||
break
|
||||
} // else invariants specified above are violated.
|
||||
}
|
||||
}
|
||||
|
||||
// We find the maximally occurring server config and check if
|
||||
// there is quorum.
|
||||
var configJSON serverConfig
|
||||
maxOccurrence := 0
|
||||
for i, count := range configCounter {
|
||||
if maxOccurrence < count {
|
||||
maxOccurrence = count
|
||||
configJSON = serverConfigs[i]
|
||||
}
|
||||
}
|
||||
|
||||
// If quorum nodes don't agree.
|
||||
if maxOccurrence < quorum {
|
||||
return scv, errXLWriteQuorum
|
||||
}
|
||||
|
||||
return configJSON, nil
|
||||
}
|
||||
|
||||
// Write config contents into a temporary file on all nodes.
|
||||
func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error {
|
||||
// For a single-node minio server setup.
|
||||
if !globalIsDistXL {
|
||||
err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Write config into temporary file on all nodes.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Return bytes written and errors (if any) during writing
|
||||
// temporary config file.
|
||||
return errs
|
||||
}
|
||||
|
||||
// Move config contents from the given temporary file onto config.json
|
||||
// on all nodes.
|
||||
func commitConfigPeers(peers adminPeers, tmpFileName string) []error {
|
||||
// For a single-node minio server setup.
|
||||
if !globalIsDistXL {
|
||||
return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)}
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Rename temporary config file into configDir/config.json on
|
||||
// all nodes.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName)
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Return errors (if any) received during rename.
|
||||
return errs
|
||||
}
|
||||
@@ -1,650 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
// Admin service names
|
||||
signalServiceRPC = "Admin.SignalService"
|
||||
reInitFormatRPC = "Admin.ReInitFormat"
|
||||
listLocksRPC = "Admin.ListLocks"
|
||||
serverInfoDataRPC = "Admin.ServerInfoData"
|
||||
getConfigRPC = "Admin.GetConfig"
|
||||
writeTmpConfigRPC = "Admin.WriteTmpConfig"
|
||||
commitConfigRPC = "Admin.CommitConfig"
|
||||
)
|
||||
|
||||
// localAdminClient - represents admin operation to be executed locally.
|
||||
type localAdminClient struct {
|
||||
}
|
||||
|
||||
// remoteAdminClient - represents admin operation to be executed
|
||||
// remotely, via RPC.
|
||||
type remoteAdminClient struct {
|
||||
*AuthRPCClient
|
||||
}
|
||||
|
||||
// adminCmdRunner - abstracts local and remote execution of admin
|
||||
// commands like service stop and service restart.
|
||||
type adminCmdRunner interface {
|
||||
SignalService(s serviceSignal) error
|
||||
ReInitFormat(dryRun bool) error
|
||||
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ServerInfoData() (ServerInfoData, error)
|
||||
GetConfig() ([]byte, error)
|
||||
WriteTmpConfig(tmpFileName string, configBytes []byte) error
|
||||
CommitConfig(tmpFileName string) error
|
||||
}
|
||||
|
||||
var errUnsupportedSignal = fmt.Errorf("unsupported signal: only restart and stop signals are supported")
|
||||
|
||||
// SignalService - sends a restart or stop signal to the local server
|
||||
func (lc localAdminClient) SignalService(s serviceSignal) error {
|
||||
switch s {
|
||||
case serviceRestart, serviceStop:
|
||||
globalServiceSignalCh <- s
|
||||
default:
|
||||
return errUnsupportedSignal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReInitFormat - re-initialize disk format.
|
||||
func (lc localAdminClient) ReInitFormat(dryRun bool) error {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
return objectAPI.ReloadFormat(context.Background(), dryRun)
|
||||
}
|
||||
|
||||
// ListLocks - Fetches lock information from local lock instrumentation.
|
||||
func (lc localAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
// check if objectLayer is initialized, if not return.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
return objectAPI.ListLocks(context.Background(), bucket, prefix, duration)
|
||||
}
|
||||
|
||||
func (rc remoteAdminClient) SignalService(s serviceSignal) (err error) {
|
||||
switch s {
|
||||
case serviceRestart, serviceStop:
|
||||
reply := AuthRPCReply{}
|
||||
err = rc.Call(signalServiceRPC, &SignalServiceArgs{Sig: s},
|
||||
&reply)
|
||||
default:
|
||||
err = errUnsupportedSignal
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReInitFormat - re-initialize disk format, remotely.
|
||||
func (rc remoteAdminClient) ReInitFormat(dryRun bool) error {
|
||||
reply := AuthRPCReply{}
|
||||
return rc.Call(reInitFormatRPC, &ReInitFormatArgs{
|
||||
DryRun: dryRun,
|
||||
}, &reply)
|
||||
}
|
||||
|
||||
// ListLocks - Sends list locks command to remote server via RPC.
|
||||
func (rc remoteAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
listArgs := ListLocksQuery{
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
Duration: duration,
|
||||
}
|
||||
var reply ListLocksReply
|
||||
if err := rc.Call(listLocksRPC, &listArgs, &reply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reply.VolLocks, nil
|
||||
}
|
||||
|
||||
// ServerInfoData - Returns the server info of this server.
|
||||
func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
|
||||
if globalBootTime.IsZero() {
|
||||
return sid, errServerNotInitialized
|
||||
}
|
||||
|
||||
// Build storage info
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer == nil {
|
||||
return sid, errServerNotInitialized
|
||||
}
|
||||
storage := objLayer.StorageInfo(context.Background())
|
||||
|
||||
return ServerInfoData{
|
||||
StorageInfo: storage,
|
||||
ConnStats: globalConnStats.toServerConnStats(),
|
||||
HTTPStats: globalHTTPStats.toServerHTTPStats(),
|
||||
Properties: ServerProperties{
|
||||
Uptime: UTCNow().Sub(globalBootTime),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
SQSARN: globalNotificationSys.GetARNList(),
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info of the server to which the RPC call is made.
|
||||
func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
|
||||
args := AuthRPCArgs{}
|
||||
reply := ServerInfoDataReply{}
|
||||
err := rc.Call(serverInfoDataRPC, &args, &reply)
|
||||
if err != nil {
|
||||
return sid, err
|
||||
}
|
||||
|
||||
return reply.ServerInfoData, nil
|
||||
}
|
||||
|
||||
// GetConfig - returns config.json of the local server.
|
||||
func (lc localAdminClient) GetConfig() ([]byte, error) {
|
||||
if globalServerConfig == nil {
|
||||
return nil, fmt.Errorf("config not present")
|
||||
}
|
||||
|
||||
return json.Marshal(globalServerConfig)
|
||||
}
|
||||
|
||||
// GetConfig - returns config.json of the remote server.
|
||||
func (rc remoteAdminClient) GetConfig() ([]byte, error) {
|
||||
args := AuthRPCArgs{}
|
||||
reply := ConfigReply{}
|
||||
if err := rc.Call(getConfigRPC, &args, &reply); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reply.Config, nil
|
||||
}
|
||||
|
||||
// WriteTmpConfig - writes config file content to a temporary file on
|
||||
// the local server.
|
||||
func (lc localAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
|
||||
return writeTmpConfigCommon(tmpFileName, configBytes)
|
||||
}
|
||||
|
||||
// WriteTmpConfig - writes config file content to a temporary file on
|
||||
// a remote node.
|
||||
func (rc remoteAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error {
|
||||
wArgs := WriteConfigArgs{
|
||||
TmpFileName: tmpFileName,
|
||||
Buf: configBytes,
|
||||
}
|
||||
|
||||
err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{})
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommitConfig - Move the new config in tmpFileName onto config.json
|
||||
// on a local node.
|
||||
func (lc localAdminClient) CommitConfig(tmpFileName string) error {
|
||||
configFile := getConfigFile()
|
||||
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
|
||||
|
||||
err := os.Rename(tmpConfigFile, configFile)
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
|
||||
reqInfo.AppendTags("configFile", configFile)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// CommitConfig - Move the new config in tmpFileName onto config.json
|
||||
// on a remote node.
|
||||
func (rc remoteAdminClient) CommitConfig(tmpFileName string) error {
|
||||
cArgs := CommitConfigArgs{
|
||||
FileName: tmpFileName,
|
||||
}
|
||||
cReply := CommitConfigReply{}
|
||||
err := rc.Call(commitConfigRPC, &cArgs, &cReply)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// adminPeer - represents an entity that implements admin API RPCs.
|
||||
type adminPeer struct {
|
||||
addr string
|
||||
cmdRunner adminCmdRunner
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
// type alias for a collection of adminPeer.
|
||||
type adminPeers []adminPeer
|
||||
|
||||
// makeAdminPeers - helper function to construct a collection of adminPeer.
|
||||
func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
|
||||
thisPeer := globalMinioAddr
|
||||
if globalMinioHost == "" {
|
||||
// When host is not explicitly provided simply
|
||||
// use the first IPv4.
|
||||
thisPeer = net.JoinHostPort(sortIPs(localIP4.ToSlice())[0], globalMinioPort)
|
||||
}
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
thisPeer,
|
||||
localAdminClient{},
|
||||
true,
|
||||
})
|
||||
|
||||
hostSet := set.CreateStringSet(globalMinioAddr)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
serviceEndpoint := path.Join(minioReservedBucketPath, adminPath)
|
||||
for _, host := range GetRemotePeers(endpoints) {
|
||||
if hostSet.Contains(host) {
|
||||
continue
|
||||
}
|
||||
hostSet.Add(host)
|
||||
adminPeerList = append(adminPeerList, adminPeer{
|
||||
addr: host,
|
||||
cmdRunner: &remoteAdminClient{newAuthRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: host,
|
||||
serviceEndpoint: serviceEndpoint,
|
||||
secureConn: globalIsSSL,
|
||||
serviceName: "Admin",
|
||||
})},
|
||||
})
|
||||
}
|
||||
|
||||
return adminPeerList
|
||||
}
|
||||
|
||||
// peersReInitFormat - reinitialize remote object layers to new format.
|
||||
func peersReInitFormat(peers adminPeers, dryRun bool) error {
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Send ReInitFormat RPC call to all nodes.
|
||||
// for local adminPeer this is a no-op.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
if !peer.isLocal {
|
||||
errs[idx] = peer.cmdRunner.ReInitFormat(dryRun)
|
||||
}
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize global adminPeer collection.
|
||||
func initGlobalAdminPeers(endpoints EndpointList) {
|
||||
globalAdminPeers = makeAdminPeers(endpoints)
|
||||
}
|
||||
|
||||
// invokeServiceCmd - Invoke Restart/Stop command.
|
||||
func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) {
|
||||
switch cmd {
|
||||
case serviceRestart, serviceStop:
|
||||
err = cp.cmdRunner.SignalService(cmd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// sendServiceCmd - Invoke Restart command on remote peers
|
||||
// adminPeer followed by on the local peer.
|
||||
func sendServiceCmd(cps adminPeers, cmd serviceSignal) {
|
||||
// Send service command like stop or restart to all remote nodes and finally run on local node.
|
||||
errs := make([]error, len(cps))
|
||||
var wg sync.WaitGroup
|
||||
remotePeers := cps[1:]
|
||||
for i := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
|
||||
errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
errs[0] = invokeServiceCmd(cps[0], cmd)
|
||||
}
|
||||
|
||||
// listPeerLocksInfo - fetch list of locks held on the given bucket,
|
||||
// matching prefix held longer than duration from all peer servers.
|
||||
func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
// Used to aggregate volume lock information from all nodes.
|
||||
allLocks := make([][]VolumeLockInfo, len(peers))
|
||||
errs := make([]error, len(peers))
|
||||
var wg sync.WaitGroup
|
||||
localPeer := peers[0]
|
||||
remotePeers := peers[1:]
|
||||
for i, remotePeer := range remotePeers {
|
||||
wg.Add(1)
|
||||
go func(idx int, remotePeer adminPeer) {
|
||||
defer wg.Done()
|
||||
// `remotePeers` is right-shifted by one position relative to `peers`
|
||||
allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
}(i+1, remotePeer)
|
||||
}
|
||||
wg.Wait()
|
||||
allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration)
|
||||
|
||||
// Summarizing errors received for ListLocks RPC across all
|
||||
// nodes. N B the possible unavailability of quorum in errors
|
||||
// applies only to distributed setup.
|
||||
errCount, err := reduceErrs(errs, []error{})
|
||||
if err != nil {
|
||||
if errCount >= (len(peers)/2 + 1) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
// Group lock information across nodes by (bucket, object)
|
||||
// pair. For readability only.
|
||||
paramLockMap := make(map[nsParam][]VolumeLockInfo)
|
||||
for _, nodeLocks := range allLocks {
|
||||
for _, lockInfo := range nodeLocks {
|
||||
param := nsParam{
|
||||
volume: lockInfo.Bucket,
|
||||
path: lockInfo.Object,
|
||||
}
|
||||
paramLockMap[param] = append(paramLockMap[param], lockInfo)
|
||||
}
|
||||
}
|
||||
groupedLockInfos := []VolumeLockInfo{}
|
||||
for _, volLocks := range paramLockMap {
|
||||
groupedLockInfos = append(groupedLockInfos, volLocks...)
|
||||
}
|
||||
return groupedLockInfos, nil
|
||||
}
|
||||
|
||||
// uptimeSlice - used to sort uptimes in chronological order.
|
||||
type uptimeSlice []struct {
|
||||
err error
|
||||
uptime time.Duration
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Less(i, j int) bool {
|
||||
return ts[i].uptime < ts[j].uptime
|
||||
}
|
||||
|
||||
func (ts uptimeSlice) Swap(i, j int) {
|
||||
ts[i], ts[j] = ts[j], ts[i]
|
||||
}
|
||||
|
||||
// getPeerUptimes - returns the uptime since the last time read quorum
|
||||
// was established on success. Otherwise returns errXLReadQuorum.
|
||||
func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
||||
// In a single node Erasure or FS backend setup the uptime of
|
||||
// the setup is the uptime of the single minio server
|
||||
// instance.
|
||||
if !globalIsDistXL {
|
||||
return UTCNow().Sub(globalBootTime), nil
|
||||
}
|
||||
|
||||
uptimes := make(uptimeSlice, len(peers))
|
||||
|
||||
// Get up time of all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
serverInfoData, rpcErr := peer.cmdRunner.ServerInfoData()
|
||||
uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Sort uptimes in chronological order.
|
||||
sort.Sort(uptimes)
|
||||
|
||||
// Pick the readQuorum'th uptime in chronological order. i.e,
|
||||
// the time at which read quorum was (re-)established.
|
||||
readQuorum := len(uptimes) / 2
|
||||
validCount := 0
|
||||
latestUptime := time.Duration(0)
|
||||
for _, uptime := range uptimes {
|
||||
if uptime.err != nil {
|
||||
logger.LogIf(context.Background(), uptime.err)
|
||||
continue
|
||||
}
|
||||
|
||||
validCount++
|
||||
if validCount >= readQuorum {
|
||||
latestUptime = uptime.uptime
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Less than readQuorum "Admin.Uptime" RPC call returned
|
||||
// successfully, so read-quorum unavailable.
|
||||
if validCount < readQuorum {
|
||||
return time.Duration(0), InsufficientReadQuorum{}
|
||||
}
|
||||
|
||||
return latestUptime, nil
|
||||
}
|
||||
|
||||
// getPeerConfig - Fetches config.json from all nodes in the setup and
|
||||
// returns the one that occurs in a majority of them.
|
||||
func getPeerConfig(peers adminPeers) ([]byte, error) {
|
||||
if !globalIsDistXL {
|
||||
return peers[0].cmdRunner.GetConfig()
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
configs := make([][]byte, len(peers))
|
||||
|
||||
// Get config from all servers.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
configs[idx], errs[idx] = peer.cmdRunner.GetConfig()
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Find the maximally occurring config among peers in a
|
||||
// distributed setup.
|
||||
|
||||
serverConfigs := make([]serverConfig, len(peers))
|
||||
for i, configBytes := range configs {
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Unmarshal the received config files.
|
||||
err := json.Unmarshal(configBytes, &serverConfigs[i])
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configJSON, err := getValidServerConfig(serverConfigs, errs)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the config.json that was present quorum or more
|
||||
// number of disks.
|
||||
return json.Marshal(configJSON)
|
||||
}
|
||||
|
||||
// getValidServerConfig - finds the server config that is present in
|
||||
// quorum or more number of servers.
|
||||
func getValidServerConfig(serverConfigs []serverConfig, errs []error) (scv serverConfig, e error) {
|
||||
// majority-based quorum
|
||||
quorum := len(serverConfigs)/2 + 1
|
||||
|
||||
// Count the number of disks a config.json was found in.
|
||||
configCounter := make([]int, len(serverConfigs))
|
||||
|
||||
// We group equal serverConfigs by the lowest index of the
|
||||
// same value; e.g, let us take the following serverConfigs
|
||||
// in a 4-node setup,
|
||||
// serverConfigs == [c1, c2, c1, c1]
|
||||
// configCounter == [3, 1, 0, 0]
|
||||
// c1, c2 are the only distinct values that appear. c1 is
|
||||
// identified by 0, the lowest index it appears in and c2 is
|
||||
// identified by 1. So, we need to find the number of times
|
||||
// each of these distinct values occur.
|
||||
|
||||
// Invariants:
|
||||
|
||||
// 1. At the beginning of the i-th iteration, the number of
|
||||
// unique configurations seen so far is equal to the number of
|
||||
// non-zero counter values in config[:i].
|
||||
|
||||
// 2. At the beginning of the i-th iteration, the sum of
|
||||
// elements of configCounter[:i] is equal to the number of
|
||||
// non-error configurations seen so far.
|
||||
|
||||
// For each of the serverConfig ...
|
||||
for i := range serverConfigs {
|
||||
// Skip nodes where getConfig failed.
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
// Check if it is equal to any of the configurations
|
||||
// seen so far. If j == i is reached then we have an
|
||||
// unseen configuration.
|
||||
for j := 0; j <= i; j++ {
|
||||
if j < i && configCounter[j] == 0 {
|
||||
// serverConfigs[j] is known to be
|
||||
// equal to a value that was already
|
||||
// seen. See example above for
|
||||
// clarity.
|
||||
continue
|
||||
} else if j < i && serverConfigs[i].ConfigDiff(&serverConfigs[j]) == "" {
|
||||
// serverConfigs[i] is equal to
|
||||
// serverConfigs[j], update
|
||||
// serverConfigs[j]'s counter since it
|
||||
// is the lower index.
|
||||
configCounter[j]++
|
||||
break
|
||||
} else if j == i {
|
||||
// serverConfigs[i] is equal to no
|
||||
// other value seen before. It is
|
||||
// unique so far.
|
||||
configCounter[i] = 1
|
||||
break
|
||||
} // else invariants specified above are violated.
|
||||
}
|
||||
}
|
||||
|
||||
// We find the maximally occurring server config and check if
|
||||
// there is quorum.
|
||||
var configJSON serverConfig
|
||||
maxOccurrence := 0
|
||||
for i, count := range configCounter {
|
||||
if maxOccurrence < count {
|
||||
maxOccurrence = count
|
||||
configJSON = serverConfigs[i]
|
||||
}
|
||||
}
|
||||
|
||||
// If quorum nodes don't agree.
|
||||
if maxOccurrence < quorum {
|
||||
return scv, errXLWriteQuorum
|
||||
}
|
||||
|
||||
return configJSON, nil
|
||||
}
|
||||
|
||||
// Write config contents into a temporary file on all nodes.
|
||||
func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error {
|
||||
// For a single-node minio server setup.
|
||||
if !globalIsDistXL {
|
||||
err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Write config into temporary file on all nodes.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes)
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Return bytes written and errors (if any) during writing
|
||||
// temporary config file.
|
||||
return errs
|
||||
}
|
||||
|
||||
// Move config contents from the given temporary file onto config.json
|
||||
// on all nodes.
|
||||
func commitConfigPeers(peers adminPeers, tmpFileName string) []error {
|
||||
// For a single-node minio server setup.
|
||||
if !globalIsDistXL {
|
||||
return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)}
|
||||
}
|
||||
|
||||
errs := make([]error, len(peers))
|
||||
|
||||
// Rename temporary config file into configDir/config.json on
|
||||
// all nodes.
|
||||
wg := sync.WaitGroup{}
|
||||
for i, peer := range peers {
|
||||
wg.Add(1)
|
||||
go func(idx int, peer adminPeer) {
|
||||
defer wg.Done()
|
||||
errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName)
|
||||
}(i, peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Return errors (if any) received during rename.
|
||||
return errs
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xrpc "github.com/minio/minio/cmd/rpc"
|
||||
)
|
||||
|
||||
const adminServiceName = "Admin"
|
||||
const adminServiceSubPath = "/admin"
|
||||
|
||||
var adminServicePath = path.Join(minioReservedBucketPath, adminServiceSubPath)
|
||||
|
||||
// adminRPCReceiver - Admin RPC receiver for admin RPC server.
|
||||
type adminRPCReceiver struct {
|
||||
local *localAdminClient
|
||||
}
|
||||
|
||||
// SignalServiceArgs - provides the signal argument to SignalService RPC
|
||||
type SignalServiceArgs struct {
|
||||
AuthArgs
|
||||
Sig serviceSignal
|
||||
}
|
||||
|
||||
// SignalService - Send a restart or stop signal to the service
|
||||
func (receiver *adminRPCReceiver) SignalService(args *SignalServiceArgs, reply *VoidReply) error {
|
||||
return receiver.local.SignalService(args.Sig)
|
||||
}
|
||||
|
||||
// ListLocksQuery - wraps ListLocks API's query values to send over RPC.
|
||||
type ListLocksQuery struct {
|
||||
AuthArgs
|
||||
Bucket string
|
||||
Prefix string
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// ListLocks - lists locks held by requests handled by this server instance.
|
||||
func (receiver *adminRPCReceiver) ListLocks(args *ListLocksQuery, reply *[]VolumeLockInfo) (err error) {
|
||||
*reply, err = receiver.local.ListLocks(args.Bucket, args.Prefix, args.Duration)
|
||||
return err
|
||||
}
|
||||
|
||||
// ServerInfo - returns the server info when object layer was initialized on this server.
|
||||
func (receiver *adminRPCReceiver) ServerInfo(args *AuthArgs, reply *ServerInfoData) (err error) {
|
||||
*reply, err = receiver.local.ServerInfo()
|
||||
return err
|
||||
}
|
||||
|
||||
// GetConfig - returns the config.json of this server.
|
||||
func (receiver *adminRPCReceiver) GetConfig(args *AuthArgs, reply *[]byte) (err error) {
|
||||
*reply, err = receiver.local.GetConfig()
|
||||
return err
|
||||
}
|
||||
|
||||
// ReInitFormatArgs - provides dry-run information to re-initialize format.json
|
||||
type ReInitFormatArgs struct {
|
||||
AuthArgs
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// ReInitFormat - re-init 'format.json'
|
||||
func (receiver *adminRPCReceiver) ReInitFormat(args *ReInitFormatArgs, reply *VoidReply) error {
|
||||
return receiver.local.ReInitFormat(args.DryRun)
|
||||
}
|
||||
|
||||
// WriteConfigArgs - wraps the bytes to be written and temporary file name.
|
||||
type WriteConfigArgs struct {
|
||||
AuthArgs
|
||||
TmpFileName string
|
||||
Buf []byte
|
||||
}
|
||||
|
||||
// WriteTmpConfig - writes the supplied config contents onto the
|
||||
// supplied temporary file.
|
||||
func (receiver *adminRPCReceiver) WriteTmpConfig(args *WriteConfigArgs, reply *VoidReply) error {
|
||||
return receiver.local.WriteTmpConfig(args.TmpFileName, args.Buf)
|
||||
}
|
||||
|
||||
// CommitConfigArgs - wraps the config file name that needs to be
|
||||
// committed into config.json on this node.
|
||||
type CommitConfigArgs struct {
|
||||
AuthArgs
|
||||
FileName string
|
||||
}
|
||||
|
||||
// CommitConfig - Renames the temporary file into config.json on this node.
|
||||
func (receiver *adminRPCReceiver) CommitConfig(args *CommitConfigArgs, reply *VoidReply) error {
|
||||
return receiver.local.CommitConfig(args.FileName)
|
||||
}
|
||||
|
||||
// NewAdminRPCServer - returns new admin RPC server.
|
||||
func NewAdminRPCServer() (*xrpc.Server, error) {
|
||||
rpcServer := xrpc.NewServer()
|
||||
if err := rpcServer.RegisterName(adminServiceName, &adminRPCReceiver{&localAdminClient{}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rpcServer, nil
|
||||
}
|
||||
|
||||
// registerAdminRPCRouter - creates and registers Admin RPC server and its router.
|
||||
func registerAdminRPCRouter(router *mux.Router) {
|
||||
rpcServer, err := NewAdminRPCServer()
|
||||
logger.FatalIf(err, "Unable to initialize Lock RPC Server", context.Background())
|
||||
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
subrouter.Path(adminServiceSubPath).Handler(rpcServer)
|
||||
}
|
||||
@@ -1,613 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// localAdminClient and AdminRPCClient are adminCmdRunner interface compatible,
|
||||
// hence below test functions are available for both clients.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Admin RPC server, adminRPCReceiver and AdminRPCClient are
|
||||
// inter-dependent, below test functions are sufficient to test all of them.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func testAdminCmdRunnerSignalService(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalServiceSignalCh := globalServiceSignalCh
|
||||
globalServiceSignalCh = make(chan serviceSignal, 10)
|
||||
defer func() {
|
||||
globalServiceSignalCh = tmpGlobalServiceSignalCh
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
signal serviceSignal
|
||||
expectErr bool
|
||||
}{
|
||||
{serviceRestart, false},
|
||||
{serviceStop, false},
|
||||
{serviceStatus, true},
|
||||
{serviceSignal(100), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.SignalService(testCase.signal)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerReInitFormat(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
defer func() {
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
objectAPI ObjectLayer
|
||||
dryRun bool
|
||||
expectErr bool
|
||||
}{
|
||||
{&DummyObjectLayer{}, true, false},
|
||||
{&DummyObjectLayer{}, false, false},
|
||||
{nil, true, true},
|
||||
{nil, false, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
err := client.ReInitFormat(testCase.dryRun)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerListLocks(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
defer func() {
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
objectAPI ObjectLayer
|
||||
expectErr bool
|
||||
}{
|
||||
{&DummyObjectLayer{}, false},
|
||||
{nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
_, err := client.ListLocks("", "", time.Duration(0))
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerServerInfo(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalBootTime := globalBootTime
|
||||
tmpGlobalObjectAPI := globalObjectAPI
|
||||
tmpGlobalConnStats := globalConnStats
|
||||
tmpGlobalHTTPStats := globalHTTPStats
|
||||
tmpGlobalNotificationSys := globalNotificationSys
|
||||
defer func() {
|
||||
globalBootTime = tmpGlobalBootTime
|
||||
globalObjectAPI = tmpGlobalObjectAPI
|
||||
globalConnStats = tmpGlobalConnStats
|
||||
globalHTTPStats = tmpGlobalHTTPStats
|
||||
globalNotificationSys = tmpGlobalNotificationSys
|
||||
}()
|
||||
|
||||
endpoints := new(EndpointList)
|
||||
notificationSys, err := NewNotificationSys(globalServerConfig, *endpoints)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bootTime time.Time
|
||||
objectAPI ObjectLayer
|
||||
connStats *ConnStats
|
||||
httpStats *HTTPStats
|
||||
notificationSys *NotificationSys
|
||||
expectErr bool
|
||||
}{
|
||||
{UTCNow(), &DummyObjectLayer{}, newConnStats(), newHTTPStats(), notificationSys, false},
|
||||
{time.Time{}, nil, nil, nil, nil, true},
|
||||
{UTCNow(), nil, nil, nil, nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalBootTime = testCase.bootTime
|
||||
globalObjectAPI = testCase.objectAPI
|
||||
globalConnStats = testCase.connStats
|
||||
globalHTTPStats = testCase.httpStats
|
||||
globalNotificationSys = testCase.notificationSys
|
||||
_, err := client.ServerInfo()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerGetConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpGlobalServerConfig := globalServerConfig
|
||||
defer func() {
|
||||
globalServerConfig = tmpGlobalServerConfig
|
||||
}()
|
||||
|
||||
config := newServerConfig()
|
||||
|
||||
testCases := []struct {
|
||||
config *serverConfig
|
||||
expectErr bool
|
||||
}{
|
||||
{globalServerConfig, false},
|
||||
{config, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalServerConfig = testCase.config
|
||||
_, err := client.GetConfig()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerWriteTmpConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpConfigDir := configDir
|
||||
defer func() {
|
||||
configDir = tmpConfigDir
|
||||
}()
|
||||
|
||||
tempDir, err := ioutil.TempDir("", ".AdminCmdRunnerWriteTmpConfig.")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
configDir = &ConfigDir{dir: tempDir}
|
||||
|
||||
testCases := []struct {
|
||||
tmpFilename string
|
||||
configBytes []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{"config1.json", []byte(`{"version":"23","region":"us-west-1a"}`), false},
|
||||
// Overwrite test.
|
||||
{"config1.json", []byte(`{"version":"23","region":"us-west-1a","browser":"on"}`), false},
|
||||
{"config2.json", []byte{}, false},
|
||||
{"config3.json", nil, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.WriteTmpConfig(testCase.tmpFilename, testCase.configBytes)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminCmdRunnerCommitConfig(t *testing.T, client adminCmdRunner) {
|
||||
tmpConfigDir := configDir
|
||||
defer func() {
|
||||
configDir = tmpConfigDir
|
||||
}()
|
||||
|
||||
tempDir, err := ioutil.TempDir("", ".AdminCmdRunnerCommitConfig.")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
configDir = &ConfigDir{dir: tempDir}
|
||||
err = ioutil.WriteFile(filepath.Join(tempDir, "config.json"), []byte{}, os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
err = client.WriteTmpConfig("config1.json", []byte(`{"version":"23","region":"us-west-1a"}`))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
tmpFilename string
|
||||
expectErr bool
|
||||
}{
|
||||
{"config1.json", false},
|
||||
{"config2.json", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := client.CommitConfig(testCase.tmpFilename)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newAdminRPCHTTPServerClient(t *testing.T) (*httptest.Server, *AdminRPCClient, *serverConfig) {
|
||||
rpcServer, err := NewAdminRPCServer()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rpcServer.ServeHTTP(w, r)
|
||||
}))
|
||||
|
||||
url, err := xnet.ParseURL(httpServer.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(url.Host)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
prevGlobalServerConfig := globalServerConfig
|
||||
globalServerConfig = newServerConfig()
|
||||
|
||||
rpcClient, err := NewAdminRPCClient(host)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
return httpServer, rpcClient, prevGlobalServerConfig
|
||||
}
|
||||
|
||||
func TestAdminRPCClientSignalService(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerSignalService(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientReInitFormat(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerReInitFormat(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientListLocks(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerListLocks(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientServerInfo(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerServerInfo(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientGetConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerGetConfig(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientWriteTmpConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerWriteTmpConfig(t, rpcClient)
|
||||
}
|
||||
|
||||
func TestAdminRPCClientCommitConfig(t *testing.T) {
|
||||
httpServer, rpcClient, prevGlobalServerConfig := newAdminRPCHTTPServerClient(t)
|
||||
defer httpServer.Close()
|
||||
defer func() {
|
||||
globalServerConfig = prevGlobalServerConfig
|
||||
}()
|
||||
|
||||
testAdminCmdRunnerCommitConfig(t, rpcClient)
|
||||
}
|
||||
|
||||
var (
|
||||
config1 = []byte(`{
|
||||
"version": "13",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"logger": {
|
||||
"console": {
|
||||
"enable": true,
|
||||
"level": "debug"
|
||||
},
|
||||
"file": {
|
||||
"enable": false,
|
||||
"fileName": "",
|
||||
"level": ""
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"clientID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
// diff from config1 - amqp.Enable is True
|
||||
config2 = []byte(`{
|
||||
"version": "13",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"logger": {
|
||||
"console": {
|
||||
"enable": true,
|
||||
"level": "debug"
|
||||
},
|
||||
"file": {
|
||||
"enable": false,
|
||||
"fileName": "",
|
||||
"level": ""
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": true,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"clientID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"index": ""
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": ""
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
)
|
||||
|
||||
// TestGetValidServerConfig - test for getValidServerConfig.
|
||||
func TestGetValidServerConfig(t *testing.T) {
|
||||
var c1, c2 serverConfig
|
||||
err := json.Unmarshal(config1, &c1)
|
||||
if err != nil {
|
||||
t.Fatalf("json unmarshal of %s failed: %v", string(config1), err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(config2, &c2)
|
||||
if err != nil {
|
||||
t.Fatalf("json unmarshal of %s failed: %v", string(config2), err)
|
||||
}
|
||||
|
||||
// Valid config.
|
||||
noErrs := []error{nil, nil, nil, nil}
|
||||
serverConfigs := []serverConfig{c1, c2, c1, c1}
|
||||
validConfig, err := getValidServerConfig(serverConfigs, noErrs)
|
||||
if err != nil {
|
||||
t.Errorf("Expected a valid config but received %v instead", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(validConfig, c1) {
|
||||
t.Errorf("Expected valid config to be %v but received %v", config1, validConfig)
|
||||
}
|
||||
|
||||
// Invalid config - no quorum.
|
||||
serverConfigs = []serverConfig{c1, c2, c2, c1}
|
||||
_, err = getValidServerConfig(serverConfigs, noErrs)
|
||||
if err != errXLWriteQuorum {
|
||||
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
|
||||
}
|
||||
|
||||
// All errors
|
||||
allErrs := []error{errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound}
|
||||
serverConfigs = []serverConfig{{}, {}, {}, {}}
|
||||
_, err = getValidServerConfig(serverConfigs, allErrs)
|
||||
if err != errXLWriteQuorum {
|
||||
t.Errorf("Expected to fail due to lack of quorum but received %v", err)
|
||||
}
|
||||
}
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
const (
|
||||
// Response request id.
|
||||
responseRequestIDKey = "x-amz-request-id"
|
||||
// Deployment id.
|
||||
responseDeploymentIDKey = "x-minio-deployment-id"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
|
||||
@@ -22,8 +22,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -40,8 +42,8 @@ type APIErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
BucketName string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
@@ -83,6 +85,7 @@ const (
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
ErrRequestTimeTooSkewed
|
||||
@@ -127,8 +130,12 @@ const (
|
||||
ErrMaximumExpires
|
||||
ErrSlowDown
|
||||
ErrInvalidPrefixMarker
|
||||
ErrBadRequest
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
ErrInvalidEncryptionMethod
|
||||
|
||||
// Server-Side-Encryption (with Customer provided key) related API errors.
|
||||
ErrInsecureSSECustomerRequest
|
||||
ErrSSEMultipartEncrypted
|
||||
@@ -140,6 +147,12 @@ const (
|
||||
ErrMissingSSECustomerKeyMD5
|
||||
ErrSSECustomerKeyMD5Mismatch
|
||||
ErrInvalidSSECustomerParameters
|
||||
ErrIncompatibleEncryptionMethod
|
||||
ErrKMSNotConfigured
|
||||
ErrKMSAuthFailure
|
||||
|
||||
ErrNoAccessKey
|
||||
ErrInvalidToken
|
||||
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification
|
||||
@@ -169,7 +182,6 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrPartsSizeUnequal
|
||||
ErrInvalidRequest
|
||||
// Minio storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
@@ -179,14 +191,19 @@ const (
|
||||
// new error codes here.
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
ErrAdminInvalidSecretKey
|
||||
ErrAdminConfigNoQuorum
|
||||
ErrAdminConfigTooLarge
|
||||
ErrAdminConfigBadJSON
|
||||
ErrAdminConfigDuplicateKeys
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
ErrHealInvalidClientToken
|
||||
@@ -194,6 +211,96 @@ const (
|
||||
ErrHealAlreadyRunning
|
||||
ErrHealOverlappingPaths
|
||||
ErrIncorrectContinuationToken
|
||||
|
||||
//S3 Select Errors
|
||||
ErrEmptyRequestBody
|
||||
ErrUnsupportedFunction
|
||||
ErrInvalidExpressionType
|
||||
ErrBusy
|
||||
ErrUnauthorizedAccess
|
||||
ErrExpressionTooLong
|
||||
ErrIllegalSQLFunctionArgument
|
||||
ErrInvalidKeyPath
|
||||
ErrInvalidCompressionFormat
|
||||
ErrInvalidFileHeaderInfo
|
||||
ErrInvalidJSONType
|
||||
ErrInvalidQuoteFields
|
||||
ErrInvalidRequestParameter
|
||||
ErrInvalidDataType
|
||||
ErrInvalidTextEncoding
|
||||
ErrInvalidDataSource
|
||||
ErrInvalidTableAlias
|
||||
ErrMissingRequiredParameter
|
||||
ErrObjectSerializationConflict
|
||||
ErrUnsupportedSQLOperation
|
||||
ErrUnsupportedSQLStructure
|
||||
ErrUnsupportedSyntax
|
||||
ErrUnsupportedRangeHeader
|
||||
ErrLexerInvalidChar
|
||||
ErrLexerInvalidOperator
|
||||
ErrLexerInvalidLiteral
|
||||
ErrLexerInvalidIONLiteral
|
||||
ErrParseExpectedDatePart
|
||||
ErrParseExpectedKeyword
|
||||
ErrParseExpectedTokenType
|
||||
ErrParseExpected2TokenTypes
|
||||
ErrParseExpectedNumber
|
||||
ErrParseExpectedRightParenBuiltinFunctionCall
|
||||
ErrParseExpectedTypeName
|
||||
ErrParseExpectedWhenClause
|
||||
ErrParseUnsupportedToken
|
||||
ErrParseUnsupportedLiteralsGroupBy
|
||||
ErrParseExpectedMember
|
||||
ErrParseUnsupportedSelect
|
||||
ErrParseUnsupportedCase
|
||||
ErrParseUnsupportedCaseClause
|
||||
ErrParseUnsupportedAlias
|
||||
ErrParseUnsupportedSyntax
|
||||
ErrParseUnknownOperator
|
||||
ErrParseMissingIdentAfterAt
|
||||
ErrParseUnexpectedOperator
|
||||
ErrParseUnexpectedTerm
|
||||
ErrParseUnexpectedToken
|
||||
ErrParseUnexpectedKeyword
|
||||
ErrParseExpectedExpression
|
||||
ErrParseExpectedLeftParenAfterCast
|
||||
ErrParseExpectedLeftParenValueConstructor
|
||||
ErrParseExpectedLeftParenBuiltinFunctionCall
|
||||
ErrParseExpectedArgumentDelimiter
|
||||
ErrParseCastArity
|
||||
ErrParseInvalidTypeParam
|
||||
ErrParseEmptySelect
|
||||
ErrParseSelectMissingFrom
|
||||
ErrParseExpectedIdentForGroupName
|
||||
ErrParseExpectedIdentForAlias
|
||||
ErrParseUnsupportedCallWithStar
|
||||
ErrParseNonUnaryAgregateFunctionCall
|
||||
ErrParseMalformedJoin
|
||||
ErrParseExpectedIdentForAt
|
||||
ErrParseAsteriskIsNotAloneInSelectList
|
||||
ErrParseCannotMixSqbAndWildcardInSelectList
|
||||
ErrParseInvalidContextForWildcardInSelectList
|
||||
ErrIncorrectSQLFunctionArgumentType
|
||||
ErrValueParseFailure
|
||||
ErrEvaluatorInvalidArguments
|
||||
ErrIntegerOverflow
|
||||
ErrLikeInvalidInputs
|
||||
ErrCastFailed
|
||||
ErrInvalidCast
|
||||
ErrEvaluatorInvalidTimestampFormatPattern
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing
|
||||
ErrEvaluatorTimestampFormatPatternDuplicateFields
|
||||
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch
|
||||
ErrEvaluatorUnterminatedTimestampFormatPatternToken
|
||||
ErrEvaluatorInvalidTimestampFormatPatternToken
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbol
|
||||
ErrEvaluatorBindingDoesNotExist
|
||||
ErrMissingHeaders
|
||||
ErrInvalidColumnIndex
|
||||
|
||||
ErrAdminConfigNotificationTargetsFailed
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
)
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
@@ -339,6 +446,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "Indicates that the version ID specified in the request does not match an existing version.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
Code: "NotImplemented",
|
||||
Description: "A header you provided implies functionality that is not implemented",
|
||||
@@ -541,6 +653,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Invalid marker prefix combination",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBadRequest: {
|
||||
Code: "BadRequest",
|
||||
Description: "400 BadRequest",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
@@ -630,6 +747,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncryptionMethod: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The encryption method specified is not supported",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureSSECustomerRequest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Requests specifying Server Side Encryption with Customer provided keys must be made over a secure connection.",
|
||||
@@ -680,6 +802,31 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The provided encryption parameters did not match the ones used originally.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncompatibleEncryptionMethod: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Server side encryption specified with both SSE-C and SSE-S3 headers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKMSNotConfigured: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Server side encryption specified but KMS is not configured",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrKMSAuthFailure: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Server side encryption specified but KMS authorization failed",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoAccessKey: {
|
||||
Code: "AccessDenied",
|
||||
Description: "No AWSAccessKey was presented",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidToken: {
|
||||
Code: "InvalidTokenId",
|
||||
Description: "The security token included in the request is invalid",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
@@ -692,7 +839,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
HTTPStatusCode: http.StatusInsufficientStorage,
|
||||
},
|
||||
ErrRequestBodyParse: {
|
||||
Code: "XMinioRequestBodyParse",
|
||||
@@ -739,6 +886,21 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The JSON you provided was not well-formed or did not validate against our published format.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchUser: {
|
||||
Code: "XMinioAdminNoSuchUser",
|
||||
Description: "The specified user does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchPolicy: {
|
||||
Code: "XMinioAdminNoSuchPolicy",
|
||||
Description: "The canned policy does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminInvalidArgument: {
|
||||
Code: "XMinioAdminInvalidArgument",
|
||||
Description: "Invalid arguments specified.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidAccessKey: {
|
||||
Code: "XMinioAdminInvalidAccessKey",
|
||||
Description: "The access key is invalid.",
|
||||
@@ -757,14 +919,29 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
ErrAdminConfigTooLarge: {
|
||||
Code: "XMinioAdminConfigTooLarge",
|
||||
Description: fmt.Sprintf("Configuration data provided exceeds the allowed maximum of %d bytes",
|
||||
maxConfigJSONSize),
|
||||
maxEConfigJSONSize),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigBadJSON: {
|
||||
Code: "XMinioAdminConfigBadJSON",
|
||||
Description: "JSON configuration provided is of incorrect format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigDuplicateKeys: {
|
||||
Code: "XMinioAdminConfigDuplicateKeys",
|
||||
Description: "JSON configuration provided has objects with duplicate keys",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigNotificationTargetsFailed: {
|
||||
Code: "XMinioAdminNotificationTargetsTestFailed",
|
||||
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminProfilerNotEnabled: {
|
||||
Code: "XMinioAdminProfilerNotEnabled",
|
||||
Description: "Unable to perform the requested operation because profiling is not enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminCredentialsMismatch: {
|
||||
Code: "XMinioAdminCredentialsMismatch",
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
@@ -785,11 +962,6 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "Your metadata headers are not supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPartsSizeUnequal: {
|
||||
Code: "XMinioPartsSizeUnequal",
|
||||
Description: "All parts except the last part should be of the same size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectTampered: {
|
||||
Code: "XMinioObjectTampered",
|
||||
Description: errObjectTampered.Error(),
|
||||
@@ -800,6 +972,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// Generic Invalid-Request error. Should be used for response errors only for unlikely
|
||||
// corner case errors for which introducing new APIErrorCode is not worth it. LogIf()
|
||||
// should be used to log the error at the source of the error for debugging purposes.
|
||||
@@ -848,21 +1021,455 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||
Description: "The continuation token provided is incorrect",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
//S3 Select API Errors
|
||||
ErrEmptyRequestBody: {
|
||||
Code: "EmptyRequestBody",
|
||||
Description: "Request body cannot be empty.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedFunction: {
|
||||
Code: "UnsupportedFunction",
|
||||
Description: "Encountered an unsupported SQL function.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDataSource: {
|
||||
Code: "InvalidDataSource",
|
||||
Description: "Invalid data source type. Only CSV and JSON are supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidExpressionType: {
|
||||
Code: "InvalidExpressionType",
|
||||
Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBusy: {
|
||||
Code: "Busy",
|
||||
Description: "The service is unavailable. Please retry.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrUnauthorizedAccess: {
|
||||
Code: "UnauthorizedAccess",
|
||||
Description: "You are not authorized to perform this operation",
|
||||
HTTPStatusCode: http.StatusUnauthorized,
|
||||
},
|
||||
ErrExpressionTooLong: {
|
||||
Code: "ExpressionTooLong",
|
||||
Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIllegalSQLFunctionArgument: {
|
||||
Code: "IllegalSqlFunctionArgument",
|
||||
Description: "Illegal argument was used in the SQL function.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidKeyPath: {
|
||||
Code: "InvalidKeyPath",
|
||||
Description: "Key path in the SQL expression is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidCompressionFormat: {
|
||||
Code: "InvalidCompressionFormat",
|
||||
Description: "The file is not in a supported compression format. Only GZIP is supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidFileHeaderInfo: {
|
||||
Code: "InvalidFileHeaderInfo",
|
||||
Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidJSONType: {
|
||||
Code: "InvalidJsonType",
|
||||
Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidQuoteFields: {
|
||||
Code: "InvalidQuoteFields",
|
||||
Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequestParameter: {
|
||||
Code: "InvalidRequestParameter",
|
||||
Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDataType: {
|
||||
Code: "InvalidDataType",
|
||||
Description: "The SQL expression contains an invalid data type.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTextEncoding: {
|
||||
Code: "InvalidTextEncoding",
|
||||
Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTableAlias: {
|
||||
Code: "InvalidTableAlias",
|
||||
Description: "The SQL expression contains an invalid table alias.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingRequiredParameter: {
|
||||
Code: "MissingRequiredParameter",
|
||||
Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectSerializationConflict: {
|
||||
Code: "ObjectSerializationConflict",
|
||||
Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedSQLOperation: {
|
||||
Code: "UnsupportedSqlOperation",
|
||||
Description: "Encountered an unsupported SQL operation.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedSQLStructure: {
|
||||
Code: "UnsupportedSqlStructure",
|
||||
Description: "Encountered an unsupported SQL structure. Check the SQL Reference.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedSyntax: {
|
||||
Code: "UnsupportedSyntax",
|
||||
Description: "Encountered invalid syntax.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnsupportedRangeHeader: {
|
||||
Code: "UnsupportedRangeHeader",
|
||||
Description: "Range header is not supported for this operation.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidChar: {
|
||||
Code: "LexerInvalidChar",
|
||||
Description: "The SQL expression contains an invalid character.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidOperator: {
|
||||
Code: "LexerInvalidOperator",
|
||||
Description: "The SQL expression contains an invalid literal.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidLiteral: {
|
||||
Code: "LexerInvalidLiteral",
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLexerInvalidIONLiteral: {
|
||||
Code: "LexerInvalidIONLiteral",
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedDatePart: {
|
||||
Code: "ParseExpectedDatePart",
|
||||
Description: "Did not find the expected date part in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedKeyword: {
|
||||
Code: "ParseExpectedKeyword",
|
||||
Description: "Did not find the expected keyword in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedTokenType: {
|
||||
Code: "ParseExpectedTokenType",
|
||||
Description: "Did not find the expected token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpected2TokenTypes: {
|
||||
Code: "ParseExpected2TokenTypes",
|
||||
Description: "Did not find the expected token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedNumber: {
|
||||
Code: "ParseExpectedNumber",
|
||||
Description: "Did not find the expected number in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedRightParenBuiltinFunctionCall: {
|
||||
Code: "ParseExpectedRightParenBuiltinFunctionCall",
|
||||
Description: "Did not find the expected right parenthesis character in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedTypeName: {
|
||||
Code: "ParseExpectedTypeName",
|
||||
Description: "Did not find the expected type name in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedWhenClause: {
|
||||
Code: "ParseExpectedWhenClause",
|
||||
Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedToken: {
|
||||
Code: "ParseUnsupportedToken",
|
||||
Description: "The SQL expression contains an unsupported token.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedLiteralsGroupBy: {
|
||||
Code: "ParseUnsupportedLiteralsGroupBy",
|
||||
Description: "The SQL expression contains an unsupported use of GROUP BY.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedMember: {
|
||||
Code: "ParseExpectedMember",
|
||||
Description: "The SQL expression contains an unsupported use of MEMBER.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedSelect: {
|
||||
Code: "ParseUnsupportedSelect",
|
||||
Description: "The SQL expression contains an unsupported use of SELECT.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedCase: {
|
||||
Code: "ParseUnsupportedCase",
|
||||
Description: "The SQL expression contains an unsupported use of CASE.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedCaseClause: {
|
||||
Code: "ParseUnsupportedCaseClause",
|
||||
Description: "The SQL expression contains an unsupported use of CASE.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedAlias: {
|
||||
Code: "ParseUnsupportedAlias",
|
||||
Description: "The SQL expression contains an unsupported use of ALIAS.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedSyntax: {
|
||||
Code: "ParseUnsupportedSyntax",
|
||||
Description: "The SQL expression contains unsupported syntax.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnknownOperator: {
|
||||
Code: "ParseUnknownOperator",
|
||||
Description: "The SQL expression contains an invalid operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseMissingIdentAfterAt: {
|
||||
Code: "ParseMissingIdentAfterAt",
|
||||
Description: "Did not find the expected identifier after the @ symbol in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedOperator: {
|
||||
Code: "ParseUnexpectedOperator",
|
||||
Description: "The SQL expression contains an unexpected operator.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedTerm: {
|
||||
Code: "ParseUnexpectedTerm",
|
||||
Description: "The SQL expression contains an unexpected term.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedToken: {
|
||||
Code: "ParseUnexpectedToken",
|
||||
Description: "The SQL expression contains an unexpected token.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnexpectedKeyword: {
|
||||
Code: "ParseUnexpectedKeyword",
|
||||
Description: "The SQL expression contains an unexpected keyword.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedExpression: {
|
||||
Code: "ParseExpectedExpression",
|
||||
Description: "Did not find the expected SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedLeftParenAfterCast: {
|
||||
Code: "ParseExpectedLeftParenAfterCast",
|
||||
Description: "Did not find expected the left parenthesis in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedLeftParenValueConstructor: {
|
||||
Code: "ParseExpectedLeftParenValueConstructor",
|
||||
Description: "Did not find expected the left parenthesis in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedLeftParenBuiltinFunctionCall: {
|
||||
Code: "ParseExpectedLeftParenBuiltinFunctionCall",
|
||||
Description: "Did not find the expected left parenthesis in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedArgumentDelimiter: {
|
||||
Code: "ParseExpectedArgumentDelimiter",
|
||||
Description: "Did not find the expected argument delimiter in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseCastArity: {
|
||||
Code: "ParseCastArity",
|
||||
Description: "The SQL expression CAST has incorrect arity.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseInvalidTypeParam: {
|
||||
Code: "ParseInvalidTypeParam",
|
||||
Description: "The SQL expression contains an invalid parameter value.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseEmptySelect: {
|
||||
Code: "ParseEmptySelect",
|
||||
Description: "The SQL expression contains an empty SELECT.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseSelectMissingFrom: {
|
||||
Code: "ParseSelectMissingFrom",
|
||||
Description: "GROUP is not supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedIdentForGroupName: {
|
||||
Code: "ParseExpectedIdentForGroupName",
|
||||
Description: "GROUP is not supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedIdentForAlias: {
|
||||
Code: "ParseExpectedIdentForAlias",
|
||||
Description: "Did not find the expected identifier for the alias in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseUnsupportedCallWithStar: {
|
||||
Code: "ParseUnsupportedCallWithStar",
|
||||
Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseNonUnaryAgregateFunctionCall: {
|
||||
Code: "ParseNonUnaryAgregateFunctionCall",
|
||||
Description: "Only one argument is supported for aggregate functions in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseMalformedJoin: {
|
||||
Code: "ParseMalformedJoin",
|
||||
Description: "JOIN is not supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseExpectedIdentForAt: {
|
||||
Code: "ParseExpectedIdentForAt",
|
||||
Description: "Did not find the expected identifier for AT name in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseAsteriskIsNotAloneInSelectList: {
|
||||
Code: "ParseAsteriskIsNotAloneInSelectList",
|
||||
Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseCannotMixSqbAndWildcardInSelectList: {
|
||||
Code: "ParseCannotMixSqbAndWildcardInSelectList",
|
||||
Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseInvalidContextForWildcardInSelectList: {
|
||||
Code: "ParseInvalidContextForWildcardInSelectList",
|
||||
Description: "Invalid use of * in SELECT list in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectSQLFunctionArgumentType: {
|
||||
Code: "IncorrectSqlFunctionArgumentType",
|
||||
Description: "Incorrect type of arguments in function call in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrValueParseFailure: {
|
||||
Code: "ValueParseFailure",
|
||||
Description: "Time stamp parse failure in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidArguments: {
|
||||
Code: "EvaluatorInvalidArguments",
|
||||
Description: "Incorrect number of arguments in the function call in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIntegerOverflow: {
|
||||
Code: "IntegerOverflow",
|
||||
Description: "Int overflow or underflow in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrLikeInvalidInputs: {
|
||||
Code: "LikeInvalidInputs",
|
||||
Description: "Invalid argument given to the LIKE clause in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrCastFailed: {
|
||||
Code: "CastFailed",
|
||||
Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidCast: {
|
||||
Code: "InvalidCast",
|
||||
Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPattern: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPattern",
|
||||
Description: "Time stamp format pattern requires additional fields in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing",
|
||||
Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorTimestampFormatPatternDuplicateFields: {
|
||||
Code: "EvaluatorTimestampFormatPatternDuplicateFields",
|
||||
Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: {
|
||||
Code: "EvaluatorUnterminatedTimestampFormatPatternToken",
|
||||
Description: "Time stamp format pattern contains unterminated token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorUnterminatedTimestampFormatPatternToken: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternToken",
|
||||
Description: "Time stamp format pattern contains an invalid token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPatternToken: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternToken",
|
||||
Description: "Time stamp format pattern contains an invalid token in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorInvalidTimestampFormatPatternSymbol: {
|
||||
Code: "EvaluatorInvalidTimestampFormatPatternSymbol",
|
||||
Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrEvaluatorBindingDoesNotExist: {
|
||||
Code: "ErrEvaluatorBindingDoesNotExist",
|
||||
Description: "A column name or a path provided does not exist in the SQL expression",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingHeaders: {
|
||||
Code: "MissingHeaders",
|
||||
Description: "Some headers in the query are missing from the file. Check the file and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidColumnIndex: {
|
||||
Code: "InvalidColumnIndex",
|
||||
Description: "The column index is invalid. Please check the service documentation and try again.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidDecompressedSize: {
|
||||
Code: "XMinioInvalidDecompressedSize",
|
||||
Description: "The data provided is unfit for decompression",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
// function written to handle all cases where we have known types of
|
||||
// errors returned by underlying layers.
|
||||
func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case errInvalidRange:
|
||||
apiErr = ErrInvalidRange
|
||||
case errDataTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
@@ -872,17 +1479,19 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
apiErr = ErrAdminInvalidSecretKey
|
||||
// SSE errors
|
||||
case errInsecureSSERequest:
|
||||
apiErr = ErrInsecureSSECustomerRequest
|
||||
case errInvalidSSEAlgorithm:
|
||||
case errInvalidEncryptionParameters:
|
||||
apiErr = ErrInvalidEncryptionParameters
|
||||
case crypto.ErrInvalidEncryptionMethod:
|
||||
apiErr = ErrInvalidEncryptionMethod
|
||||
case crypto.ErrInvalidCustomerAlgorithm:
|
||||
apiErr = ErrInvalidSSECustomerAlgorithm
|
||||
case errInvalidSSEKey:
|
||||
case crypto.ErrInvalidCustomerKey:
|
||||
apiErr = ErrInvalidSSECustomerKey
|
||||
case errMissingSSEKey:
|
||||
case crypto.ErrMissingCustomerKey:
|
||||
apiErr = ErrMissingSSECustomerKey
|
||||
case errMissingSSEKeyMD5:
|
||||
case crypto.ErrMissingCustomerKeyMD5:
|
||||
apiErr = ErrMissingSSECustomerKeyMD5
|
||||
case errSSEKeyMD5Mismatch:
|
||||
case crypto.ErrCustomerKeyMD5Mismatch:
|
||||
apiErr = ErrSSECustomerKeyMD5Mismatch
|
||||
case errObjectTampered:
|
||||
apiErr = ErrObjectTampered
|
||||
@@ -890,12 +1499,24 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrSSEEncryptedObject
|
||||
case errInvalidSSEParameters:
|
||||
apiErr = ErrInvalidSSECustomerParameters
|
||||
case errSSEKeyMismatch:
|
||||
case crypto.ErrInvalidCustomerKey, crypto.ErrSecretKeyMismatch:
|
||||
apiErr = ErrAccessDenied // no access without correct key
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
case crypto.ErrIncompatibleEncryptionMethod:
|
||||
apiErr = ErrIncompatibleEncryptionMethod
|
||||
case errKMSNotConfigured:
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case crypto.ErrKMSAuthLogin:
|
||||
apiErr = ErrKMSAuthFailure
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
switch err {
|
||||
case errInvalidDecompressedSize:
|
||||
apiErr = ErrInvalidDecompressedSize
|
||||
}
|
||||
|
||||
if apiErr != ErrNone {
|
||||
// If there was a match in the above switch case.
|
||||
return apiErr
|
||||
@@ -903,10 +1524,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
|
||||
// etcd specific errors, a key is always a bucket for us return
|
||||
// ErrNoSuchBucket in such a case.
|
||||
if e, ok := err.(*client.Error); ok {
|
||||
if e.Code == client.ErrorCodeKeyNotFound {
|
||||
return ErrNoSuchBucket
|
||||
}
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
return ErrNoSuchBucket
|
||||
}
|
||||
|
||||
switch err.(type) {
|
||||
@@ -972,8 +1591,6 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooLarge
|
||||
case UnsupportedMetadata:
|
||||
apiErr = ErrUnsupportedMetadata
|
||||
case PartsSizeUnequal:
|
||||
apiErr = ErrPartsSizeUnequal
|
||||
case BucketPolicyNotFound:
|
||||
apiErr = ErrNoSuchBucketPolicy
|
||||
case *event.ErrInvalidEventName:
|
||||
@@ -1000,8 +1617,14 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrUnsupportedNotification
|
||||
case BackendDown:
|
||||
apiErr = ErrBackendDown
|
||||
case crypto.Error:
|
||||
apiErr = ErrObjectTampered
|
||||
default:
|
||||
apiErr = ErrInternalError
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
@@ -1009,17 +1632,20 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
|
||||
// getAPIError provides API Error for input API error code.
|
||||
func getAPIError(code APIErrorCode) APIError {
|
||||
return errorCodeResponse[code]
|
||||
if apiErr, ok := errorCodeResponse[code]; ok {
|
||||
return apiErr
|
||||
}
|
||||
return errorCodeResponse[ErrInternalError]
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(err APIError, resource string) APIErrorResponse {
|
||||
func getAPIErrorResponse(err APIError, resource, requestid string) APIErrorResponse {
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
Resource: resource,
|
||||
RequestID: "3L137",
|
||||
RequestID: requestid,
|
||||
HostID: "3L137",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
@@ -51,12 +53,11 @@ var toAPIErrorCodeTests = []struct {
|
||||
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
|
||||
|
||||
// SSE-C errors
|
||||
{err: errInsecureSSERequest, errCode: ErrInsecureSSECustomerRequest},
|
||||
{err: errInvalidSSEAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
|
||||
{err: errMissingSSEKey, errCode: ErrMissingSSECustomerKey},
|
||||
{err: errInvalidSSEKey, errCode: ErrInvalidSSECustomerKey},
|
||||
{err: errMissingSSEKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
|
||||
{err: errSSEKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
|
||||
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
|
||||
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
|
||||
{err: crypto.ErrInvalidCustomerKey, errCode: ErrInvalidSSECustomerKey},
|
||||
{err: crypto.ErrMissingCustomerKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
|
||||
{err: crypto.ErrCustomerKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
|
||||
{err: errObjectTampered, errCode: ErrObjectTampered},
|
||||
|
||||
{err: nil, errCode: ErrNone},
|
||||
@@ -64,8 +65,9 @@ var toAPIErrorCodeTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for i, testCase := range toAPIErrorCodeTests {
|
||||
errCode := toAPIErrorCode(testCase.err)
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
if errCode != testCase.errCode {
|
||||
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -34,15 +36,16 @@ func mustGetRequestID(t time.Time) string {
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
// Set unique request ID for each reply.
|
||||
w.Header().Set(responseRequestIDKey, mustGetRequestID(UTCNow()))
|
||||
w.Header().Set("Server", globalServerUserAgent)
|
||||
w.Header().Set("Server", "Minio/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
w.Header().Set("X-Amz-Bucket-Region", region)
|
||||
}
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
// Remove sensitive information
|
||||
crypto.RemoveSensitiveHeaders(w.Header())
|
||||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
@@ -63,13 +66,10 @@ func encodeResponseJSON(response interface{}) []byte {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *httpRange) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(objInfo.Size, 10))
|
||||
|
||||
// Set last modified time.
|
||||
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
@@ -97,11 +97,34 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
if contentRange != nil && contentRange.offsetBegin > -1 {
|
||||
// Override content-length
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(contentRange.getLength(), 10))
|
||||
w.Header().Set("Content-Range", contentRange.String())
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
var totalObjectSize int64
|
||||
switch {
|
||||
case crypto.IsEncrypted(objInfo.UserDefined):
|
||||
totalObjectSize, err = objInfo.DecryptedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
|
||||
w.Header().Set("Content-Range", contentRange)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,15 +22,22 @@ import (
|
||||
)
|
||||
|
||||
// Parse bucket url queries
|
||||
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
@@ -47,44 +54,69 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
}
|
||||
}
|
||||
|
||||
if values.Get("max-keys") != "" {
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
token = values.Get("continuation-token")
|
||||
startAfter = values.Get("start-after")
|
||||
delimiter = values.Get("delimiter")
|
||||
if values.Get("max-keys") != "" {
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ?uploads
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-uploads") != "" {
|
||||
var err error
|
||||
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
|
||||
errCode = ErrInvalidMaxUploads
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
if values.Get("max-uploads") != "" {
|
||||
maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
}
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse object url queries
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
|
||||
uploadID = values.Get("uploadId")
|
||||
partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
|
||||
var err error
|
||||
errCode = ErrNone
|
||||
|
||||
if values.Get("max-parts") != "" {
|
||||
maxParts, _ = strconv.Atoi(values.Get("max-parts"))
|
||||
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
|
||||
errCode = ErrInvalidMaxParts
|
||||
return
|
||||
}
|
||||
} else {
|
||||
maxParts = maxPartsList
|
||||
}
|
||||
|
||||
if values.Get("part-number-marker") != "" {
|
||||
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
|
||||
errCode = ErrInvalidPartNumberMarker
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
uploadID = values.Get("uploadId")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,7 +156,10 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
prefix, marker, delimiter, maxKeys, encodingType := getListObjectsV1Args(testCase.values)
|
||||
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
|
||||
if argsErr != ErrNone {
|
||||
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
|
||||
}
|
||||
if prefix != testCase.prefix {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
|
||||
}
|
||||
@@ -198,7 +201,10 @@ func TestGetObjectsResources(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
uploadID, partNumberMarker, maxParts, encodingType := getObjectResources(testCase.values)
|
||||
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
|
||||
if argsErr != ErrNone {
|
||||
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
|
||||
}
|
||||
if uploadID != testCase.uploadID {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,10 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "net/http"
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Represents additional fields necessary for ErrPartTooSmall S3 error.
|
||||
type completeMultipartAPIError struct {
|
||||
@@ -42,9 +45,9 @@ type completeMultipartAPIError struct {
|
||||
// of this function.
|
||||
func writePartSmallErrorResponse(w http.ResponseWriter, r *http.Request, err PartTooSmall) {
|
||||
|
||||
apiError := getAPIError(toAPIErrorCode(err))
|
||||
apiError := getAPIError(toAPIErrorCode(context.Background(), err))
|
||||
// Generate complete multipart error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, r.URL.Path)
|
||||
errorResponse := getAPIErrorResponse(apiError, r.URL.Path, w.Header().Get(responseRequestIDKey))
|
||||
cmpErrResp := completeMultipartAPIError{err.PartSize, int64(5242880), err.PartNumber, err.PartETag, errorResponse}
|
||||
encodedErrorResponse := encodeResponse(cmpErrResp)
|
||||
|
||||
|
||||
@@ -568,16 +568,25 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
// writeErrorRespone writes error headers
|
||||
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
|
||||
func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL, browser bool) {
|
||||
switch errorCode {
|
||||
case ErrSlowDown, ErrServerNotInitialized, ErrReadQuorum, ErrWriteQuorum:
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set("Retry-After", "120")
|
||||
case ErrAccessDenied:
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
w.Header().Set("Location", minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
apiError := getAPIError(errorCode)
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path)
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
@@ -592,7 +601,7 @@ func writeErrorResponseHeadersOnly(w http.ResponseWriter, errorCode APIErrorCode
|
||||
func writeErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) {
|
||||
apiError := getAPIError(errorCode)
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path)
|
||||
errorResponse := getAPIErrorResponse(apiError, reqURL.Path, w.Header().Get(responseRequestIDKey))
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
@@ -608,7 +617,7 @@ func writeCustomErrorResponseJSON(w http.ResponseWriter, errorCode APIErrorCode,
|
||||
Code: apiError.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
RequestID: "3L137",
|
||||
RequestID: w.Header().Get(responseRequestIDKey),
|
||||
HostID: "3L137",
|
||||
}
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
|
||||
@@ -20,29 +20,25 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
var err error
|
||||
var cacheConfig = globalServerConfig.GetCacheConfig()
|
||||
if len(cacheConfig.Drives) > 0 {
|
||||
// initialize the new disk cache objects.
|
||||
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
|
||||
logger.FatalIf(err, "Unable to initialize disk caching")
|
||||
}
|
||||
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCacheObjectsFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
@@ -71,6 +67,8 @@ func registerAPIRouter(router *mux.Router) {
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObject
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
// CopyObject
|
||||
@@ -108,7 +106,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceAll(api.PostPolicyBucketHandler))
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@@ -27,8 +28,11 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -86,6 +90,7 @@ const (
|
||||
authTypeSigned
|
||||
authTypeSignedV2
|
||||
authTypeJWT
|
||||
authTypeSTS
|
||||
)
|
||||
|
||||
// Get request authentication type.
|
||||
@@ -104,6 +109,8 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()["Action"]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header["Authorization"]; !ok {
|
||||
return authTypeAnonymous
|
||||
}
|
||||
@@ -112,43 +119,140 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok && getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) { // we only support V4 (no presign) with auth. body
|
||||
s3Err = isReqAuthenticated(r, region)
|
||||
if _, ok := r.Header["X-Amz-Content-Sha256"]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
var owner bool
|
||||
_, owner, s3Err = getReqAccessKeyV4(r, region)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if !owner {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
s3Err = isReqAuthenticated(ctx, r, region)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
|
||||
}
|
||||
return s3Err
|
||||
}
|
||||
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) APIErrorCode {
|
||||
isOwner := true
|
||||
accountName := globalServerConfig.GetCredential().AccessKey
|
||||
// Fetch the security token set by the client.
|
||||
func getSessionToken(r *http.Request) (token string) {
|
||||
token = r.Header.Get("X-Amz-Security-Token")
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get("X-Amz-Security-Token")
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
claims := make(map[string]interface{})
|
||||
token := getSessionToken(r)
|
||||
if token == "" {
|
||||
return claims, nil
|
||||
}
|
||||
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
// that clients cannot decode the token using the temp
|
||||
// secret keys and generate an entirely new claim by essentially
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalServerConfig.GetCredential().SecretKey), nil
|
||||
}
|
||||
p := &jwtgo.Parser{
|
||||
ValidMethods: []string{
|
||||
jwtgo.SigningMethodHS256.Alg(),
|
||||
jwtgo.SigningMethodHS512.Alg(),
|
||||
},
|
||||
}
|
||||
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !jtoken.Valid {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
v, ok := claims["accessKey"]
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
if _, ok = v.(string); !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown:
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if errorCode := isReqAuthenticatedV2(r); errorCode != ErrNone {
|
||||
return errorCode
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
}
|
||||
|
||||
if errorCode := isReqAuthenticated(r, region); errorCode != ErrNone {
|
||||
return errorCode
|
||||
if s3Err = isReqAuthenticated(ctx, r, region); s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
default:
|
||||
isOwner = false
|
||||
accountName = ""
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
@@ -174,17 +278,31 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: accountName,
|
||||
Action: action,
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, ""),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint),
|
||||
IsOwner: isOwner,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -209,7 +327,7 @@ func reqSignatureV4Verify(r *http.Request, region string) (s3Error APIErrorCode)
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '4'.
|
||||
func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
func isReqAuthenticated(ctx context.Context, r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
if errCode := reqSignatureV4Verify(r, region); errCode != ErrNone {
|
||||
return errCode
|
||||
}
|
||||
@@ -244,9 +362,9 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256))
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(err)
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
return ErrNone
|
||||
@@ -288,12 +406,67 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if !isHTTPRequestValid(r) {
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL)
|
||||
writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// isPutAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", ""),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -344,11 +345,14 @@ func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int6
|
||||
|
||||
// Tests is requested authenticated function, tests replies for s3 errors.
|
||||
func TestIsReqAuthenticated(t *testing.T) {
|
||||
path, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -374,21 +378,26 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := isReqAuthenticated(testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(err))
|
||||
if s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion()); s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %d)", i, testCase.s3Error, s3Error, toAPIErrorCode(ctx, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
path, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -406,8 +415,9 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := checkAdminRequestAuthType(testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
@@ -62,7 +61,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -98,7 +97,8 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
metadata := make(map[string]string)
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata)
|
||||
opts := ObjectOptions{}
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata, opts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
md5hex = getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -138,12 +138,6 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -151,18 +145,13 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created on function return.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runPutObjectPartBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -170,18 +159,13 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created on function return.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runPutObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -189,6 +173,7 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created on function return.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
@@ -196,16 +181,10 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
// Benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -225,7 +204,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -240,7 +219,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
@@ -269,12 +248,6 @@ func generateBytesData(size int) []byte {
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -282,18 +255,13 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
if err != nil {
|
||||
@@ -301,6 +269,7 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
}
|
||||
// cleaning up the backend by removing all the directories and files created.
|
||||
defer removeRoots(disks)
|
||||
|
||||
// uses *testing.B and the object Layer to run the benchmark.
|
||||
runGetObjectBenchmarkParallel(b, objLayer, objSize)
|
||||
}
|
||||
@@ -308,16 +277,10 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -341,7 +304,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -359,16 +322,10 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// Parallel benchmark utility functions for ObjectLayer.GetObject().
|
||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
||||
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
b.Fatalf("Unable to initialize config. %s", err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -387,7 +344,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -403,7 +360,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
|
||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
172
cmd/bitrot-streaming.go
Normal file
172
cmd/bitrot-streaming.go
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block, i.e prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
verifyTillIdx int
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
n, err := b.iow.Write(hashBytes)
|
||||
if n != len(hashBytes) {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
n, err = b.iow.Write(p)
|
||||
b.currentBlockIdx++
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
err := b.iow.Close()
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
// 1) pipe.Write()
|
||||
// 2) pipe.Close()
|
||||
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
|
||||
// Hence an immediate Read() on the file can return incorrect data.
|
||||
<-b.canClose
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)}
|
||||
go func() {
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize := bitrotSumsTotalSize + length
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
r.CloseWithError(err)
|
||||
}
|
||||
close(bw.canClose)
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
||||
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
||||
type streamingBitrotReader struct {
|
||||
disk StorageAPI
|
||||
rc io.ReadCloser
|
||||
volume string
|
||||
filePath string
|
||||
tillOffset int64
|
||||
currOffset int64
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
hashBytes []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) Close() error {
|
||||
if b.rc == nil {
|
||||
return nil
|
||||
}
|
||||
return b.rc.Close()
|
||||
}
|
||||
|
||||
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
var err error
|
||||
if offset%b.shardSize != 0 {
|
||||
// Offset should always be aligned to b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
if b.rc == nil {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if offset != b.currOffset {
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
b.h.Reset()
|
||||
_, err = io.ReadFull(b.rc, b.hashBytes)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = io.ReadFull(b.rc, buf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.h.Write(buf)
|
||||
|
||||
if bytes.Compare(b.h.Sum(nil), b.hashBytes) != 0 {
|
||||
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// Returns streaming bitrot reader implementation.
|
||||
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
||||
h := algo.New()
|
||||
return &streamingBitrotReader{
|
||||
disk,
|
||||
nil,
|
||||
volume,
|
||||
filePath,
|
||||
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
|
||||
0,
|
||||
h,
|
||||
shardSize,
|
||||
make([]byte, h.Size()),
|
||||
}
|
||||
}
|
||||
109
cmd/bitrot-whole.go
Normal file
109
cmd/bitrot-whole.go
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// Implementation to calculate bitrot for the whole file.
|
||||
type wholeBitrotWriter struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
shardSize int64 // This is the shard size of the erasure logic
|
||||
hash.Hash // For bitrot hash
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block and prevent programmer errors.
|
||||
currentBlockIdx int
|
||||
lastBlockIdx int
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize {
|
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
return 0, errUnexpected
|
||||
}
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return 0, err
|
||||
}
|
||||
b.currentBlockIdx++
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot writer.
|
||||
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
|
||||
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)}
|
||||
}
|
||||
|
||||
// Implementation to verify bitrot for the whole file.
|
||||
type wholeBitrotReader struct {
|
||||
disk StorageAPI
|
||||
volume string
|
||||
filePath string
|
||||
verifier *BitrotVerifier // Holds the bit-rot info
|
||||
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
|
||||
buf []byte // Holds bit-rot verified data
|
||||
}
|
||||
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := context.Background()
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(context.Background(), errLessData)
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
b.buf = b.buf[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Returns whole-file bitrot reader.
|
||||
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader {
|
||||
return &wholeBitrotReader{
|
||||
disk: disk,
|
||||
volume: volume,
|
||||
filePath: filePath,
|
||||
verifier: &BitrotVerifier{algo, sum},
|
||||
tillOffset: tillOffset,
|
||||
buf: nil,
|
||||
}
|
||||
}
|
||||
187
cmd/bitrot.go
Normal file
187
cmd/bitrot.go
Normal file
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
|
||||
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
|
||||
|
||||
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
|
||||
type BitrotAlgorithm uint
|
||||
|
||||
const (
|
||||
// SHA256 represents the SHA-256 hash function
|
||||
SHA256 BitrotAlgorithm = 1 + iota
|
||||
// HighwayHash256 represents the HighwayHash-256 hash function
|
||||
HighwayHash256
|
||||
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
|
||||
HighwayHash256S
|
||||
// BLAKE2b512 represents the BLAKE2b-512 hash function
|
||||
BLAKE2b512
|
||||
)
|
||||
|
||||
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
|
||||
const (
|
||||
DefaultBitrotAlgorithm = HighwayHash256S
|
||||
)
|
||||
|
||||
var bitrotAlgorithms = map[BitrotAlgorithm]string{
|
||||
SHA256: "sha256",
|
||||
BLAKE2b512: "blake2b",
|
||||
HighwayHash256: "highwayhash256",
|
||||
HighwayHash256S: "highwayhash256S",
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash calculating the given bitrot algorithm.
|
||||
func (a BitrotAlgorithm) New() hash.Hash {
|
||||
switch a {
|
||||
case SHA256:
|
||||
return sha256.New()
|
||||
case BLAKE2b512:
|
||||
b2, _ := blake2b.New512(nil) // New512 never returns an error if the key is nil
|
||||
return b2
|
||||
case HighwayHash256:
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
case HighwayHash256S:
|
||||
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
||||
return hh
|
||||
default:
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Available reports whether the given algorithm is available.
|
||||
func (a BitrotAlgorithm) Available() bool {
|
||||
_, ok := bitrotAlgorithms[a]
|
||||
return ok
|
||||
}
|
||||
|
||||
// String returns the string identifier for a given bitrot algorithm.
|
||||
// If the algorithm is not supported String panics.
|
||||
func (a BitrotAlgorithm) String() string {
|
||||
name, ok := bitrotAlgorithms[a]
|
||||
if !ok {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// NewBitrotVerifier returns a new BitrotVerifier implementing the given algorithm.
|
||||
func NewBitrotVerifier(algorithm BitrotAlgorithm, checksum []byte) *BitrotVerifier {
|
||||
return &BitrotVerifier{algorithm, checksum}
|
||||
}
|
||||
|
||||
// BitrotVerifier can be used to verify protected data.
|
||||
type BitrotVerifier struct {
|
||||
algorithm BitrotAlgorithm
|
||||
sum []byte
|
||||
}
|
||||
|
||||
// BitrotAlgorithmFromString returns a bitrot algorithm from the given string representation.
|
||||
// It returns 0 if the string representation does not match any supported algorithm.
|
||||
// The zero value of a bitrot algorithm is never supported.
|
||||
func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
for alg, name := range bitrotAlgorithms {
|
||||
if name == s {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
|
||||
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
|
||||
}
|
||||
|
||||
// Close all the readers.
|
||||
func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
for _, r := range rs {
|
||||
if br, ok := r.(*streamingBitrotReader); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if bw, ok := w.(*streamingBitrotWriter); ok {
|
||||
bw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
|
||||
func bitrotWriterSum(w io.Writer) []byte {
|
||||
if bw, ok := w.(*wholeBitrotWriter); ok {
|
||||
return bw.Sum(nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify if a file has bitrot error.
|
||||
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
|
||||
if algo != HighwayHash256S {
|
||||
buf := []byte{}
|
||||
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
|
||||
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, shardSize)
|
||||
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
|
||||
defer closeBitrotReaders([]io.ReaderAt{r})
|
||||
var offset int64
|
||||
for {
|
||||
if offset == tillOffset {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
tmpBuf := buf
|
||||
if int64(len(tmpBuf)) > (tillOffset - offset) {
|
||||
tmpBuf = tmpBuf[:(tillOffset - offset)]
|
||||
}
|
||||
n, err = r.ReadAt(tmpBuf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += int64(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
84
cmd/bitrot_test.go
Normal file
84
cmd/bitrot_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newPosix(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = writer.Write([]byte("aaaaa"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
|
||||
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
if _, err = reader.ReadAt(b, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 10); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b, 20); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllBitrotAlgorithms(t *testing.T) {
|
||||
for bitrotAlgo := range bitrotAlgorithms {
|
||||
testBitrotReaderWriterAlgo(t, bitrotAlgo)
|
||||
}
|
||||
}
|
||||
@@ -56,11 +56,12 @@ func (bf *BoolFlag) UnmarshalJSON(data []byte) (err error) {
|
||||
|
||||
// ParseBoolFlag - parses string into BoolFlag.
|
||||
func ParseBoolFlag(s string) (bf BoolFlag, err error) {
|
||||
if s == "on" {
|
||||
switch s {
|
||||
case "on":
|
||||
bf = true
|
||||
} else if s == "off" {
|
||||
case "off":
|
||||
bf = false
|
||||
} else {
|
||||
default:
|
||||
err = fmt.Errorf("invalid value ‘%s’ for BoolFlag", s)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,9 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
@@ -54,19 +57,21 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
|
||||
// NOTE: It is recommended that this API to be used for application development.
|
||||
// Minio continues to support ListObjectsV1 for supporting legacy tools.
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListObjectsV2")
|
||||
ctx := newContext(r, w, "ListObjectsV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -76,22 +81,14 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _, errCode := getListObjectsV2Args(urlValues)
|
||||
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(w, errCode, r.URL)
|
||||
writeErrorResponse(w, errCode, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// In ListObjectsV2 'continuation-token' is the marker.
|
||||
marker := token
|
||||
// Check if 'continuation-token' is empty.
|
||||
if token == "" {
|
||||
// Then we need to use 'start-after' as marker instead.
|
||||
marker = startAfter
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
if s3Error := validateListObjectsArgs(prefix, token, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
@@ -101,17 +98,28 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
if listObjectsV2Info.Objects[i].IsEncrypted() {
|
||||
var actualSize int64
|
||||
if listObjectsV2Info.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(w, ErrInvalidDecompressedSize, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsV2Info.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -131,33 +139,39 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListObjectsV1")
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
|
||||
prefix, marker, delimiter, maxKeys, _, s3Error := getListObjectsV1Args(r.URL.Query())
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
|
||||
// does not throw an error.
|
||||
if maxKeys < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
|
||||
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
} // Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
listObjects := objectAPI.ListObjects
|
||||
@@ -169,20 +183,30 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsInfo.Objects {
|
||||
if listObjectsInfo.Objects[i].IsEncrypted() {
|
||||
var actualSize int64
|
||||
if listObjectsInfo.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsInfo.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(w, ErrInvalidDecompressedSize, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsInfo.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
|
||||
@@ -28,20 +28,18 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -64,7 +62,7 @@ func initFederatorBackend(objLayer ObjectLayer) {
|
||||
g.Go(func() error {
|
||||
r, gerr := globalDNSConfig.Get(b[index].Name)
|
||||
if gerr != nil {
|
||||
if etcd.IsKeyNotFound(gerr) || gerr == dns.ErrNoEntriesFound {
|
||||
if gerr == dns.ErrNoEntriesFound {
|
||||
return globalDNSConfig.Put(b[index].Name)
|
||||
}
|
||||
return gerr
|
||||
@@ -89,34 +87,30 @@ func initFederatorBackend(objLayer ObjectLayer) {
|
||||
// -------------------------
|
||||
// This operation returns bucket location.
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketLocation")
|
||||
ctx := newContext(r, w, "GetBucketLocation")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
defer bucketLock.RUnlock()
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -143,38 +137,44 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// uploads in the response.
|
||||
//
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListMultipartUploads")
|
||||
ctx := newContext(r, w, "ListMultipartUploads")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _, s3Error := getBucketMultipartResources(r.URL.Query())
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if maxUploads < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
|
||||
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if keyMarker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !hasPrefix(keyMarker, prefix) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// generate response
|
||||
@@ -190,11 +190,13 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
// This implementation of the GET operation returns a list of all buckets
|
||||
// owned by the authenticated sender of the request.
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListBuckets")
|
||||
ctx := newContext(r, w, "ListBuckets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
@@ -204,15 +206,15 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var bucketsInfo []BucketInfo
|
||||
if globalDNSConfig != nil {
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !etcd.IsKeyNotFound(err) && err != dns.ErrNoEntriesFound {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
if err != nil && err != dns.ErrNoEntriesFound {
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
bucketSet := set.NewStringSet()
|
||||
@@ -231,7 +233,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
var err error
|
||||
bucketsInfo, err = listBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -246,14 +248,16 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
// DeleteMultipleObjectsHandler - deletes multiple objects.
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteMultipleObjects")
|
||||
ctx := newContext(r, w, "DeleteMultipleObjects")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -262,7 +266,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// In the event access is denied, a 200 response should still be returned
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -270,14 +274,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
|
||||
writeErrorResponse(w, ErrMissingContentMD5, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -293,7 +297,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Read incoming body XML bytes.
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
writeErrorResponse(w, ErrInternalError, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -301,7 +305,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedXML, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -309,38 +313,28 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if globalWORMEnabled {
|
||||
// Not required to check whether given objects exist or not, because
|
||||
// DeleteMultipleObject is always successful irrespective of object existence.
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var wg = &sync.WaitGroup{} // Allocate a new wait group.
|
||||
var dErrs = make([]error, len(deleteObjects.Objects))
|
||||
|
||||
// Delete all requested objects in parallel.
|
||||
for index, object := range deleteObjects.Objects {
|
||||
wg.Add(1)
|
||||
go func(i int, obj ObjectIdentifier) {
|
||||
defer wg.Done()
|
||||
// If the request is denied access, each item
|
||||
// should be marked as 'AccessDenied'
|
||||
if s3Error == ErrAccessDenied {
|
||||
dErrs[i] = PrefixAccessDenied{
|
||||
Bucket: bucket,
|
||||
Object: obj.ObjectName,
|
||||
}
|
||||
return
|
||||
}
|
||||
deleteObject := objectAPI.DeleteObject
|
||||
if api.CacheAPI() != nil {
|
||||
deleteObject = api.CacheAPI().DeleteObject
|
||||
}
|
||||
dErr := deleteObject(ctx, bucket, obj.ObjectName)
|
||||
if dErr != nil {
|
||||
dErrs[i] = dErr
|
||||
}
|
||||
}(index, object)
|
||||
deleteObject := objectAPI.DeleteObject
|
||||
if api.CacheAPI() != nil {
|
||||
deleteObject = api.CacheAPI().DeleteObject
|
||||
}
|
||||
|
||||
var dErrs = make([]error, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
// If the request is denied access, each item
|
||||
// should be marked as 'AccessDenied'
|
||||
if s3Error == ErrAccessDenied {
|
||||
dErrs[index] = PrefixAccessDenied{
|
||||
Bucket: bucket,
|
||||
Object: object.ObjectName,
|
||||
}
|
||||
continue
|
||||
}
|
||||
dErrs[index] = deleteObject(ctx, bucket, object.ObjectName)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Collect deleted objects and errors if any.
|
||||
var deletedObjects []ObjectIdentifier
|
||||
@@ -360,8 +354,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
// Error during delete should be collected separately.
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
|
||||
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
|
||||
Code: errorCodeResponse[toAPIErrorCode(ctx, err)].Code,
|
||||
Message: errorCodeResponse[toAPIErrorCode(ctx, err)].Description,
|
||||
Key: object.ObjectName,
|
||||
})
|
||||
}
|
||||
@@ -375,7 +369,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Get host and port from Request.RemoteAddr failing which
|
||||
// fill them with empty strings.
|
||||
host, port, err := net.SplitHostPort(r.RemoteAddr)
|
||||
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
||||
if err != nil {
|
||||
host, port = "", ""
|
||||
}
|
||||
@@ -388,10 +382,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
Object: ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
},
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -400,11 +395,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// ----------
|
||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutBucket")
|
||||
ctx := newContext(r, w, "PutBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -412,35 +409,35 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse incoming location constraint.
|
||||
location, s3Error := parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if location sent by the client is valid, reject
|
||||
// requests which do not follow valid region requirements.
|
||||
if !isValidLocation(location) {
|
||||
writeErrorResponse(w, ErrInvalidRegion, r.URL)
|
||||
writeErrorResponse(w, ErrInvalidRegion, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if _, err := globalDNSConfig.Get(bucket); err != nil {
|
||||
if etcd.IsKeyNotFound(err) || err == dns.ErrNoEntriesFound {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(ctx, bucket)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -450,18 +447,18 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
}
|
||||
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL)
|
||||
writeErrorResponse(w, ErrBucketAlreadyOwnedByYou, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -476,30 +473,39 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
// signature policy in multipart/form-data
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PostPolicyBucket")
|
||||
ctx := newContext(r, w, "PostPolicyBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInvalidRequest, r.URL)
|
||||
writeErrorResponse(w, ErrInvalidRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -508,7 +514,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -516,7 +522,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
form, err := reader.ReadForm(maxFormMemory)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -527,13 +533,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if file is provided, error out otherwise.
|
||||
if fileBody == nil {
|
||||
writeErrorResponse(w, ErrPOSTFileRequired, r.URL)
|
||||
writeErrorResponse(w, ErrPOSTFileRequired, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -555,7 +561,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if successRedirect != "" {
|
||||
redirectURL, err = url.Parse(successRedirect)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -563,83 +569,104 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Verify policy signature.
|
||||
apiErr := doesPolicySignatureMatch(formValues)
|
||||
if apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
writeErrorResponse(w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooSmall), r.URL)
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(w, toAPIErrorCode(errDataTooLarge), r.URL)
|
||||
// Make sure formValues adhere to policy restrictions.
|
||||
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the object size is within expected range, also the file size
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, errDataTooSmall), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, errDataTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract metadata to be saved from received Form.
|
||||
metadata, err := extractMetadataFromHeader(ctx, formValues)
|
||||
metadata := make(map[string]string)
|
||||
err = extractMetadataFromMap(ctx, formValues, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
writeErrorResponse(w, ErrInternalError, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||
var objectEncryptionKey []byte
|
||||
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasSSECustomerHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C requests
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
var reader io.Reader
|
||||
var key []byte
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
if crypto.SSEC.IsRequested(formValues) {
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
reader, err = newEncryptReader(hashReader, key, metadata)
|
||||
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, metadata, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -648,20 +675,21 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
w.Header().Set("Location", location)
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
host, port, err := net.SplitHostPort(r.RemoteAddr)
|
||||
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
||||
if err != nil {
|
||||
host, port = "", ""
|
||||
}
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
RespElements: extractRespElements(w),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
|
||||
if successRedirect != "" {
|
||||
@@ -695,7 +723,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "HeadBucket")
|
||||
ctx := newContext(r, w, "HeadBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
@@ -716,7 +746,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
getBucketInfo = api.CacheAPI().GetBucketInfo
|
||||
}
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -725,19 +755,21 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteBucket")
|
||||
ctx := newContext(r, w, "DeleteBucket")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -747,22 +779,19 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalNotificationSys.RemoveNotification(bucket)
|
||||
globalPolicySys.Remove(bucket)
|
||||
for nerr := range globalNotificationSys.DeleteBucket(bucket) {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
globalNotificationSys.DeleteBucket(ctx, bucket)
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
// Deleting DNS entry failed, attempt to create the bucket again.
|
||||
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for Get bucket location.
|
||||
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -116,7 +116,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -220,7 +220,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for HEAD bucket.
|
||||
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -235,7 +235,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -437,7 +437,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// construct HTTP request for List multipart uploads endpoint.
|
||||
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
|
||||
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if gerr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
|
||||
}
|
||||
@@ -454,7 +454,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -471,7 +471,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// construct HTTP request for List multipart uploads endpoint.
|
||||
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
|
||||
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "") // Generate an anonymous request.
|
||||
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
|
||||
if err != nil {
|
||||
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
@@ -551,7 +551,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
for i, testCase := range testCases {
|
||||
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if lerr != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
|
||||
}
|
||||
@@ -568,7 +568,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey)
|
||||
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
@@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
@@ -745,7 +745,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
// Generate a signed or anonymous request based on the testCase
|
||||
if testCase.accessKey != "" {
|
||||
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey)
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
|
||||
} else {
|
||||
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
|
||||
@@ -785,7 +785,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
nilBucket := "dummy-bucket"
|
||||
nilObject := ""
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "")
|
||||
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
@@ -42,30 +42,32 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck
|
||||
// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
// It returns empty configuration if its not set.
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketNotification")
|
||||
ctx := newContext(r, w, "GetBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,17 +76,21 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if err != errNoSuchNotifications {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if nConfig.XMLNS == "" {
|
||||
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -94,16 +100,18 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
// PutBucketNotificationHandler - This HTTP handler stores given notification configuration as per
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutBucketNotification")
|
||||
ctx := newContext(r, w, "PutBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objectAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,19 +119,19 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucketNotification always needs a Content-Length.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -132,24 +140,21 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
if err != nil {
|
||||
apiErr := ErrMalformedXML
|
||||
if event.IsEventError(err) {
|
||||
apiErr = toAPIErrorCode(err)
|
||||
apiErr = toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
writeErrorResponse(w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
rulesMap := config.ToRulesMap()
|
||||
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
|
||||
for nerr := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
globalNotificationSys.PutBucketNotification(ctx, bucketName, rulesMap)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -157,24 +162,30 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
// ListenBucketNotificationHandler - This HTTP handler sends events to the connected HTTP client.
|
||||
// Client should send prefix/suffix object name to match and events to watch as query parameters.
|
||||
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListenBucketNotification")
|
||||
ctx := newContext(r, w, "ListenBucketNotification")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate if bucket exists.
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsListenBucketSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListenBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -182,11 +193,11 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
var prefix string
|
||||
if len(values["prefix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNamePrefix, r.URL)
|
||||
writeErrorResponse(w, ErrFilterNamePrefix, r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
if len(values["prefix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -195,11 +206,11 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
var suffix string
|
||||
if len(values["suffix"]) > 1 {
|
||||
writeErrorResponse(w, ErrFilterNameSuffix, r.URL)
|
||||
writeErrorResponse(w, ErrFilterNameSuffix, r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
if len(values["suffix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,7 +223,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
for _, s := range values["events"] {
|
||||
eventName, err := event.ParseName(s)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -220,28 +231,27 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(r.RemoteAddr)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
target, err := target.NewHTTPClientTarget(*host, w)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())
|
||||
|
||||
if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
|
||||
if err = globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
|
||||
@@ -249,29 +259,23 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
|
||||
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
errCh := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr)
|
||||
for nerr := range errCh {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
globalNotificationSys.ListenBucketNotification(ctx, bucketName, eventNames, pattern, target.ID(), *thisAddr)
|
||||
|
||||
<-target.DoneCh
|
||||
|
||||
if err = RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,11 +38,13 @@ const (
|
||||
// PutBucketPolicyHandler - This HTTP handler stores given bucket policy configuration as per
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutBucketPolicy")
|
||||
ctx := newContext(r, w, "PutBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,51 +52,48 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
// PutBucketPolicy always needs Content-Length.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if r.ContentLength > maxBucketPolicySize {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalPolicySys.Set(bucket, *bucketPolicy)
|
||||
for nerr := range globalNotificationSys.SetBucketPolicy(bucket, bucketPolicy) {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
globalNotificationSys.SetBucketPolicy(ctx, bucket, bucketPolicy)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
@@ -102,11 +101,13 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
// DeleteBucketPolicyHandler - This HTTP handler removes bucket policy configuration.
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteBucketPolicy")
|
||||
ctx := newContext(r, w, "DeleteBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -114,26 +115,23 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalPolicySys.Remove(bucket)
|
||||
for nerr := range globalNotificationSys.RemoveBucketPolicy(bucket) {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", nerr.Host.Name)
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
globalNotificationSys.RemoveBucketPolicy(ctx, bucket)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
@@ -141,11 +139,13 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
|
||||
// GetBucketPolicyHandler - This HTTP handler returns bucket policy configuration.
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketPolicy")
|
||||
ctx := newContext(r, w, "GetBucketPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -153,27 +153,26 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access policy.
|
||||
bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -254,7 +254,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey)
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -268,7 +268,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey)
|
||||
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "")
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -346,7 +346,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -360,7 +360,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -417,7 +417,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
@@ -437,11 +437,13 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
if recV4.Code != testCase.expectedRespStatus {
|
||||
// Verify whether the bucket policy fetched is same as the one inserted.
|
||||
expectedPolicy, err := policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName)
|
||||
var expectedPolicy *policy.Policy
|
||||
expectedPolicy, err = policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v", err)
|
||||
}
|
||||
gotPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName)
|
||||
var gotPolicy *policy.Policy
|
||||
gotPolicy, err = policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v", err)
|
||||
}
|
||||
@@ -454,7 +456,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -509,7 +511,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "")
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
@@ -589,7 +591,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -639,7 +641,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV4 := httptest.NewRecorder()
|
||||
// construct HTTP request for Delete bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -661,7 +663,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey)
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -678,7 +680,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for Delete bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey)
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -712,7 +714,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
nilBucket := "dummy-bucket"
|
||||
|
||||
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "")
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
|
||||
56
cmd/certs.go
56
cmd/certs.go
@@ -68,24 +68,6 @@ func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
|
||||
}
|
||||
|
||||
func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
// Get all CA file names.
|
||||
var caFiles []string
|
||||
fis, err := readDir(certsCAsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
// We are only interested in regular files here.
|
||||
caFiles = append(caFiles, pathJoin(certsCAsDir, fi))
|
||||
}
|
||||
if len(caFiles) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rootCAs, _ := x509.SystemCertPool()
|
||||
if rootCAs == nil {
|
||||
// In some systems (like Windows) system cert pool is
|
||||
@@ -94,16 +76,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
|
||||
// Load custom root CAs for client requests
|
||||
for _, caFile := range caFiles {
|
||||
caCert, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
fis, err := readDir(certsCAsDir)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
err = nil // Return success if CA's directory is missing.
|
||||
}
|
||||
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
return rootCAs, err
|
||||
}
|
||||
|
||||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||
if err != nil {
|
||||
return rootCAs, err
|
||||
}
|
||||
rootCAs.AppendCertsFromPEM(caCert)
|
||||
}
|
||||
return rootCAs, nil
|
||||
}
|
||||
|
||||
@@ -150,24 +142,20 @@ func loadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, c *certs.Certs, secureConn bool, err error) {
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, nil, false, nil
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
|
||||
if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), loadX509KeyPair)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
if rootCAs, err = getRootCAs(getCADir()); err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
secureConn = true
|
||||
return x509Certs, rootCAs, c, secureConn, nil
|
||||
return x509Certs, c, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -223,7 +223,8 @@ func TestGetRootCAs(t *testing.T) {
|
||||
certCAsDir string
|
||||
expectedErr error
|
||||
}{
|
||||
{"nonexistent-dir", errFileNotFound},
|
||||
// ignores non-existent directories.
|
||||
{"nonexistent-dir", nil},
|
||||
// Ignores directories.
|
||||
{dir1, nil},
|
||||
// Ignore empty directory.
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
@@ -26,14 +26,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/console"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Check for updates and print a notification message
|
||||
@@ -48,74 +50,136 @@ func checkUpdate(mode string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize and load config from remote etcd or local config directory
|
||||
func initConfig() {
|
||||
if globalEtcdClient != nil {
|
||||
kapi := etcd.NewKeysAPI(globalEtcdClient)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
_, err := kapi.Get(ctx, getConfigFile(), nil)
|
||||
cancel()
|
||||
if err == nil {
|
||||
logger.FatalIf(migrateConfig(), "Config migration failed.")
|
||||
logger.FatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion)
|
||||
} else {
|
||||
if etcd.IsKeyNotFound(err) {
|
||||
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time.")
|
||||
logger.Info("Created minio configuration file successfully at %v", globalEtcdClient.Endpoints())
|
||||
} else {
|
||||
logger.FatalIf(err, "Unable to load config version: '%s'.", serverConfigVersion)
|
||||
}
|
||||
}
|
||||
return
|
||||
// Load logger targets based on user's configuration
|
||||
func loadLoggers() {
|
||||
auditEndpoint, ok := os.LookupEnv("MINIO_AUDIT_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable audit HTTP logging through ENV.
|
||||
logger.AddAuditTarget(http.New(auditEndpoint, NewCustomHTTPTransport()))
|
||||
}
|
||||
|
||||
if isFile(getConfigFile()) {
|
||||
logger.FatalIf(migrateConfig(), "Config migration failed")
|
||||
logger.FatalIf(loadConfig(), "Unable to load the configuration file")
|
||||
loggerEndpoint, ok := os.LookupEnv("MINIO_LOGGER_HTTP_ENDPOINT")
|
||||
if ok {
|
||||
// Enable HTTP logging through ENV.
|
||||
logger.AddTarget(http.New(loggerEndpoint, NewCustomHTTPTransport()))
|
||||
} else {
|
||||
// Config file does not exist, we create it fresh and return upon success.
|
||||
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time")
|
||||
logger.Info("Created minio configuration file successfully at " + getConfigDir())
|
||||
for _, l := range globalServerConfig.Logger.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(console.New())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
var dir string
|
||||
var dirSet bool
|
||||
|
||||
switch {
|
||||
case ctx.IsSet(option):
|
||||
dir = ctx.String(option)
|
||||
dirSet = true
|
||||
case ctx.GlobalIsSet(option):
|
||||
dir = ctx.GlobalString(option)
|
||||
dirSet = true
|
||||
// cli package does not expose parent's option option. Below code is workaround.
|
||||
if dir == "" || dir == getDefaultDir() {
|
||||
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
|
||||
if ctx.Parent().GlobalIsSet(option) {
|
||||
dir = ctx.Parent().GlobalString(option)
|
||||
dirSet = true
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Neither local nor global option is provided. In this case, try to use
|
||||
// default directory.
|
||||
dir = getDefaultDir()
|
||||
if dir == "" {
|
||||
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
|
||||
}
|
||||
}
|
||||
|
||||
if dir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
dirAbs, err := filepath.Abs(dir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
|
||||
|
||||
return &ConfigDir{path: dirAbs}, dirSet
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
var configDir string
|
||||
|
||||
if ctx.IsSet("config-dir") {
|
||||
configDir = ctx.String("config-dir")
|
||||
} else if ctx.GlobalIsSet("config-dir") {
|
||||
configDir = ctx.GlobalString("config-dir")
|
||||
// cli package does not expose parent's "config-dir" option. Below code is workaround.
|
||||
if configDir == "" || configDir == getConfigDir() {
|
||||
if ctx.Parent().GlobalIsSet("config-dir") {
|
||||
configDir = ctx.Parent().GlobalString("config-dir")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Neither local nor global config-dir option is provided. In this case, try to use
|
||||
// default config directory.
|
||||
configDir = getConfigDir()
|
||||
if configDir == "" {
|
||||
logger.FatalIf(errors.New("missing option"), "config-dir option must be provided")
|
||||
}
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if jason flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
if globalCLIContext.JSON {
|
||||
logger.EnableJSON()
|
||||
}
|
||||
|
||||
if configDir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty")
|
||||
// Get quiet flag from command line argument.
|
||||
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
|
||||
if globalCLIContext.Quiet {
|
||||
logger.EnableQuiet()
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
configDirAbs, err := filepath.Abs(configDir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
|
||||
setConfigDir(configDirAbs)
|
||||
// Get anonymous flag from command line argument.
|
||||
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
|
||||
if globalCLIContext.Anonymous {
|
||||
logger.EnableAnonymous()
|
||||
}
|
||||
|
||||
// Fetch address option
|
||||
globalCLIContext.Addr = ctx.GlobalString("address")
|
||||
if globalCLIContext.Addr == "" || globalCLIContext.Addr == ":"+globalMinioDefaultPort {
|
||||
globalCLIContext.Addr = ctx.String("address")
|
||||
}
|
||||
|
||||
// Set all config, certs and CAs directories.
|
||||
var configSet, certsSet bool
|
||||
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
|
||||
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
|
||||
|
||||
// Remove this code when we deprecate and remove config-dir.
|
||||
// This code is to make sure we inherit from the config-dir
|
||||
// option if certs-dir is not provided.
|
||||
if !certsSet && configSet {
|
||||
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
|
||||
}
|
||||
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
}
|
||||
|
||||
// Parses the given compression exclude list `extensions` or `content-types`.
|
||||
func parseCompressIncludes(includes []string) ([]string, error) {
|
||||
for _, e := range includes {
|
||||
if len(e) == 0 {
|
||||
return nil, uiErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type (%s) cannot be empty", e)
|
||||
}
|
||||
}
|
||||
return includes, nil
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
compressEnvDelimiter := ","
|
||||
// Start profiler if env is set.
|
||||
if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" {
|
||||
globalProfiler = startProfiler(profiler)
|
||||
var err error
|
||||
globalProfiler, err = startProfiler(profiler, "")
|
||||
logger.FatalIf(err, "Unable to setup a profiler")
|
||||
}
|
||||
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
@@ -125,6 +189,7 @@ func handleCommonEnvVars() {
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
cred.Expiration = timeSentinel
|
||||
|
||||
// credential Envs are set globally.
|
||||
globalIsEnvCreds = true
|
||||
@@ -134,7 +199,7 @@ func handleCommonEnvVars() {
|
||||
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
|
||||
browserFlag, err := ParseBoolFlag(browser)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Unable to validate MINIO_BROWSER environment variable")
|
||||
logger.Fatal(uiErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
||||
// browser Envs are set globally, this does not represent
|
||||
@@ -153,27 +218,82 @@ func handleCommonEnvVars() {
|
||||
etcdEndpointsEnv, ok := os.LookupEnv("MINIO_ETCD_ENDPOINTS")
|
||||
if ok {
|
||||
etcdEndpoints := strings.Split(etcdEndpointsEnv, ",")
|
||||
|
||||
var etcdSecure bool
|
||||
for _, endpoint := range etcdEndpoints {
|
||||
u, err := xnet.ParseURL(endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
|
||||
}
|
||||
// If one of the endpoint is https, we will use https directly.
|
||||
etcdSecure = etcdSecure || u.Scheme == "https"
|
||||
}
|
||||
|
||||
var err error
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
})
|
||||
if etcdSecure {
|
||||
// This is only to support client side certificate authentication
|
||||
// https://coreos.com/etcd/docs/latest/op-guide/security.html
|
||||
etcdClientCertFile, ok1 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT")
|
||||
etcdClientCertKey, ok2 := os.LookupEnv("MINIO_ETCD_CLIENT_CERT_KEY")
|
||||
var getClientCertificate func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
if ok1 && ok2 {
|
||||
getClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
cert, terr := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
|
||||
return &cert, terr
|
||||
}
|
||||
}
|
||||
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
TLS: &tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
GetClientCertificate: getClientCertificate,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
globalEtcdClient, err = etcd.New(etcd.Config{
|
||||
Endpoints: etcdEndpoints,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
DialKeepAliveTime: defaultDialKeepAlive,
|
||||
})
|
||||
}
|
||||
logger.FatalIf(err, "Unable to initialize etcd with %s", etcdEndpoints)
|
||||
}
|
||||
|
||||
globalDomainName, globalIsEnvDomainName = os.LookupEnv("MINIO_DOMAIN")
|
||||
if globalDomainName != "" {
|
||||
if _, ok = dns2.IsDomainName(globalDomainName); !ok {
|
||||
logger.Fatal(uiErrInvalidDomainValue(nil).Msg("Unknown value `%s`", globalDomainName), "Invalid MINIO_DOMAIN value in environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
minioEndpointsEnv, ok := os.LookupEnv("MINIO_PUBLIC_IPS")
|
||||
if ok {
|
||||
minioEndpoints := strings.Split(minioEndpointsEnv, ",")
|
||||
globalDomainIPs = set.NewStringSet()
|
||||
for i, ip := range minioEndpoints {
|
||||
if net.ParseIP(ip) == nil {
|
||||
logger.FatalIf(errInvalidArgument, "Unable to initialize Minio server with invalid MINIO_PUBLIC_IPS[%d]: %s", i, ip)
|
||||
var domainIPs = set.NewStringSet()
|
||||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
addrs, err := net.LookupHost(endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize Minio server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
domainIPs.Add(addr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
globalDomainIPs.Add(ip)
|
||||
domainIPs.Add(endpoint)
|
||||
}
|
||||
updateDomainIPs(domainIPs)
|
||||
} else {
|
||||
// Add found interfaces IP address to global domain IPS,
|
||||
// loopback addresses will be naturally dropped.
|
||||
updateDomainIPs(localIP4)
|
||||
}
|
||||
|
||||
if globalDomainName != "" && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
var err error
|
||||
globalDNSConfig, err = dns.NewCoreDNS(globalDomainName, globalDomainIPs, globalMinioPort, globalEtcdClient)
|
||||
@@ -254,7 +374,7 @@ func handleCommonEnvVars() {
|
||||
if worm := os.Getenv("MINIO_WORM"); worm != "" {
|
||||
wormFlag, err := ParseBoolFlag(worm)
|
||||
if err != nil {
|
||||
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Unable to validate MINIO_WORM environment variable")
|
||||
logger.Fatal(uiErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Invalid MINIO_WORM value in environment variable")
|
||||
}
|
||||
|
||||
// worm Envs are set globally, this does not represent
|
||||
@@ -262,4 +382,28 @@ func handleCommonEnvVars() {
|
||||
globalIsEnvWORM = true
|
||||
globalWORMEnabled = bool(wormFlag)
|
||||
}
|
||||
|
||||
if compress := os.Getenv("MINIO_COMPRESS"); compress != "" {
|
||||
globalIsCompressionEnabled = strings.EqualFold(compress, "true")
|
||||
}
|
||||
|
||||
compressExtensions := os.Getenv("MINIO_COMPRESS_EXTENSIONS")
|
||||
compressMimeTypes := os.Getenv("MINIO_COMPRESS_MIMETYPES")
|
||||
if compressExtensions != "" || compressMimeTypes != "" {
|
||||
globalIsEnvCompression = true
|
||||
if compressExtensions != "" {
|
||||
extensions, err := parseCompressIncludes(strings.Split(compressExtensions, compressEnvDelimiter))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid MINIO_COMPRESS_EXTENSIONS value (`%s`)", extensions)
|
||||
}
|
||||
globalCompressExtensions = extensions
|
||||
}
|
||||
if compressMimeTypes != "" {
|
||||
contenttypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, compressEnvDelimiter))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid MINIO_COMPRESS_MIMETYPES value (`%s`)", contenttypes)
|
||||
}
|
||||
globalCompressMimeTypes = contenttypes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
151
cmd/config-common.go
Normal file
151
cmd/config-common.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
// Read entire content by setting size to -1
|
||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return config not found on empty content.
|
||||
if buffer.Len() == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
_, err := client.Delete(ctx, configFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
}
|
||||
|
||||
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, configFile, string(data))
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), nil, ObjectOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
|
||||
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
for watchResp := range globalEtcdClient.Watch(ctx, configFile) {
|
||||
for _, event := range watchResp.Events {
|
||||
if event.IsModify() || event.IsCreate() {
|
||||
loadCfgFn(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
}
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
if globalEtcdClient != nil {
|
||||
return checkConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -17,17 +17,22 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Steps to move from version N to version N+1
|
||||
@@ -39,9 +44,9 @@ import (
|
||||
// 6. Make changes in config-current_test.go for any test change
|
||||
|
||||
// Config version
|
||||
const serverConfigVersion = "26"
|
||||
const serverConfigVersion = "33"
|
||||
|
||||
type serverConfig = serverConfigV26
|
||||
type serverConfig = serverConfigV33
|
||||
|
||||
var (
|
||||
// globalServerConfig server config.
|
||||
@@ -62,11 +67,21 @@ func (s *serverConfig) SetRegion(region string) {
|
||||
|
||||
// GetRegion get current region.
|
||||
func (s *serverConfig) GetRegion() string {
|
||||
if globalIsEnvRegion {
|
||||
return globalServerRegion
|
||||
}
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return s.Region
|
||||
}
|
||||
|
||||
// SetCredential sets new credential and returns the previous credential.
|
||||
func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Credentials) {
|
||||
if creds.IsValid() && globalActiveCred.IsValid() {
|
||||
globalActiveCred = creds
|
||||
}
|
||||
|
||||
// Save previous credential.
|
||||
prevCred = s.Credential
|
||||
|
||||
@@ -79,15 +94,12 @@ func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Cred
|
||||
|
||||
// GetCredentials get current credentials.
|
||||
func (s *serverConfig) GetCredential() auth.Credentials {
|
||||
if globalActiveCred.IsValid() {
|
||||
return globalActiveCred
|
||||
}
|
||||
return s.Credential
|
||||
}
|
||||
|
||||
// SetBrowser set if browser is enabled.
|
||||
func (s *serverConfig) SetBrowser(b bool) {
|
||||
// Set the new value.
|
||||
s.Browser = BoolFlag(b)
|
||||
}
|
||||
|
||||
// SetWorm set if worm is enabled.
|
||||
func (s *serverConfig) SetWorm(b bool) {
|
||||
// Set the new value.
|
||||
@@ -102,16 +114,23 @@ func (s *serverConfig) SetStorageClass(standardClass, rrsClass storageClass) {
|
||||
// GetStorageClass reads storage class fields from current config.
|
||||
// It returns the standard and reduced redundancy storage class struct
|
||||
func (s *serverConfig) GetStorageClass() (storageClass, storageClass) {
|
||||
if globalIsStorageClass {
|
||||
return globalStandardStorageClass, globalRRStorageClass
|
||||
}
|
||||
if s == nil {
|
||||
return storageClass{}, storageClass{}
|
||||
}
|
||||
return s.StorageClass.Standard, s.StorageClass.RRS
|
||||
}
|
||||
|
||||
// GetBrowser get current credentials.
|
||||
func (s *serverConfig) GetBrowser() bool {
|
||||
return bool(s.Browser)
|
||||
}
|
||||
|
||||
// GetWorm get current credentials.
|
||||
func (s *serverConfig) GetWorm() bool {
|
||||
if globalIsEnvWORM {
|
||||
return globalWORMEnabled
|
||||
}
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
return bool(s.Worm)
|
||||
}
|
||||
|
||||
@@ -125,22 +144,262 @@ func (s *serverConfig) SetCacheConfig(drives, exclude []string, expiry int, maxu
|
||||
|
||||
// GetCacheConfig gets the current cache config
|
||||
func (s *serverConfig) GetCacheConfig() CacheConfig {
|
||||
if globalIsDiskCacheEnabled {
|
||||
return CacheConfig{
|
||||
Drives: globalCacheDrives,
|
||||
Exclude: globalCacheExcludes,
|
||||
Expiry: globalCacheExpiry,
|
||||
MaxUse: globalCacheMaxUse,
|
||||
}
|
||||
}
|
||||
if s == nil {
|
||||
return CacheConfig{}
|
||||
}
|
||||
return s.Cache
|
||||
}
|
||||
|
||||
// Save config file to corresponding backend
|
||||
func Save(configFile string, data interface{}) error {
|
||||
return quick.SaveConfig(data, configFile, globalEtcdClient)
|
||||
func (s *serverConfig) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.Version != serverConfigVersion {
|
||||
return fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", serverConfigVersion, s.Version)
|
||||
}
|
||||
|
||||
// Validate credential fields only when
|
||||
// they are not set via the environment
|
||||
// Error out if global is env credential is not set and config has invalid credential
|
||||
if !globalIsEnvCreds && !s.Credential.IsValid() {
|
||||
return errors.New("invalid credential in config file")
|
||||
}
|
||||
|
||||
// Region: nothing to validate
|
||||
// Worm, Cache and StorageClass values are already validated during json unmarshal
|
||||
for _, v := range s.Notify.AMQP {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("amqp: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Elasticsearch {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("elasticsearch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Kafka {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("kafka: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MQTT {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mqtt: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MySQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mysql: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NATS {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nats: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NSQ {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nsq: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.PostgreSQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("postgreSQL: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Redis {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("redis: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Webhook {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("webhook: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load config from backend
|
||||
func Load(configFile string, data interface{}) (quick.Config, error) {
|
||||
return quick.LoadConfig(configFile, globalEtcdClient, data)
|
||||
// SetCompressionConfig sets the current compression config
|
||||
func (s *serverConfig) SetCompressionConfig(extensions []string, mimeTypes []string) {
|
||||
s.Compression.Extensions = extensions
|
||||
s.Compression.MimeTypes = mimeTypes
|
||||
s.Compression.Enabled = globalIsCompressionEnabled
|
||||
}
|
||||
|
||||
// GetVersion gets config version from backend
|
||||
func GetVersion(configFile string) (string, error) {
|
||||
return quick.GetVersion(configFile, globalEtcdClient)
|
||||
// GetCompressionConfig gets the current compression config
|
||||
func (s *serverConfig) GetCompressionConfig() compressionConfig {
|
||||
return s.Compression
|
||||
}
|
||||
|
||||
func (s *serverConfig) loadFromEnvs() {
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
s.SetCredential(globalActiveCred)
|
||||
}
|
||||
|
||||
if globalIsEnvWORM {
|
||||
s.SetWorm(globalWORMEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
s.SetRegion(globalServerRegion)
|
||||
}
|
||||
|
||||
if globalIsStorageClass {
|
||||
s.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
s.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
}
|
||||
|
||||
if globalIsEnvCompression {
|
||||
s.SetCompressionConfig(globalCompressExtensions, globalCompressMimeTypes)
|
||||
}
|
||||
|
||||
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
|
||||
if u, err := xnet.ParseURL(jwksURL); err == nil {
|
||||
s.OpenID.JWKS.URL = u
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
|
||||
}
|
||||
}
|
||||
|
||||
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
|
||||
if u, err := xnet.ParseURL(opaURL); err == nil {
|
||||
s.Policy.OPA.URL = u
|
||||
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNotificationTargets tries to establish connections to all notification
|
||||
// targets when enabled. This is a good way to make sure all configurations
|
||||
// set by the user can work.
|
||||
func (s *serverConfig) TestNotificationTargets() error {
|
||||
for k, v := range s.Notify.AMQP {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewAMQPTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amqp(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Elasticsearch {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewElasticsearchTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("elasticsearch(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Kafka {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewKafkaTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kafka(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.MQTT {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMQTTTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mqtt(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.MySQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMySQLTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mysql(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NATS {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNATSTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nats(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NSQ {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNSQTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nsq(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.PostgreSQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewPostgreSQLTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Redis {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewRedisTarget(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the string describing a difference with the given
|
||||
@@ -154,18 +413,18 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
|
||||
return "Credential configuration differs"
|
||||
case s.Region != t.Region:
|
||||
return "Region configuration differs"
|
||||
case s.Browser != t.Browser:
|
||||
return "Browser configuration differs"
|
||||
case s.Domain != t.Domain:
|
||||
return "Domain configuration differs"
|
||||
case s.StorageClass != t.StorageClass:
|
||||
return "StorageClass configuration differs"
|
||||
case !reflect.DeepEqual(s.Cache, t.Cache):
|
||||
return "Cache configuration differs"
|
||||
case !reflect.DeepEqual(s.Compression, t.Compression):
|
||||
return "Compression configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.AMQP, t.Notify.AMQP):
|
||||
return "AMQP Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS):
|
||||
return "NATS Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.NSQ, t.Notify.NSQ):
|
||||
return "NSQ Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch):
|
||||
return "ElasticSearch Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis):
|
||||
@@ -180,6 +439,10 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
|
||||
return "MySQL Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Notify.MQTT, t.Notify.MQTT):
|
||||
return "MQTT Notification configuration differs"
|
||||
case !reflect.DeepEqual(s.Logger, t.Logger):
|
||||
return "Logger configuration differs"
|
||||
case !reflect.DeepEqual(s.KMS, t.KMS):
|
||||
return "KMS configuration differs"
|
||||
case reflect.DeepEqual(s, t):
|
||||
return ""
|
||||
default:
|
||||
@@ -197,7 +460,6 @@ func newServerConfig() *serverConfig {
|
||||
Version: serverConfigVersion,
|
||||
Credential: cred,
|
||||
Region: globalMinioDefaultRegion,
|
||||
Browser: true,
|
||||
StorageClass: storageClassConfig{
|
||||
Standard: storageClass{},
|
||||
RRS: storageClass{},
|
||||
@@ -208,7 +470,13 @@ func newServerConfig() *serverConfig {
|
||||
Expiry: globalCacheExpiry,
|
||||
MaxUse: globalCacheMaxUse,
|
||||
},
|
||||
KMS: crypto.KMSConfig{},
|
||||
Notify: notifier{},
|
||||
Compression: compressionConfig{
|
||||
Enabled: false,
|
||||
Extensions: globalCompressExtensions,
|
||||
MimeTypes: globalCompressMimeTypes,
|
||||
},
|
||||
}
|
||||
|
||||
// Make sure to initialize notification configs.
|
||||
@@ -222,6 +490,8 @@ func newServerConfig() *serverConfig {
|
||||
srvCfg.Notify.Redis["1"] = target.RedisArgs{}
|
||||
srvCfg.Notify.NATS = make(map[string]target.NATSArgs)
|
||||
srvCfg.Notify.NATS["1"] = target.NATSArgs{}
|
||||
srvCfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
srvCfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
|
||||
srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{}
|
||||
srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs)
|
||||
@@ -235,184 +505,146 @@ func newServerConfig() *serverConfig {
|
||||
srvCfg.Cache.Exclude = make([]string, 0)
|
||||
srvCfg.Cache.Expiry = globalCacheExpiry
|
||||
srvCfg.Cache.MaxUse = globalCacheMaxUse
|
||||
|
||||
// Console logging is on by default
|
||||
srvCfg.Logger.Console.Enabled = true
|
||||
// Create an example of HTTP logger
|
||||
srvCfg.Logger.HTTP = make(map[string]loggerHTTP)
|
||||
srvCfg.Logger.HTTP["target1"] = loggerHTTP{Endpoint: "https://username:password@example.com/api"}
|
||||
|
||||
return srvCfg
|
||||
}
|
||||
|
||||
// newConfig - initialize a new server config, saves env parameters if
|
||||
// found, otherwise use default parameters
|
||||
func newConfig() error {
|
||||
// Initialize server config.
|
||||
srvCfg, err := newQuickConfig(newServerConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
srvCfg.SetCredential(globalActiveCred)
|
||||
}
|
||||
|
||||
if globalIsEnvBrowser {
|
||||
srvCfg.SetBrowser(globalIsBrowserEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvWORM {
|
||||
srvCfg.SetWorm(globalWORMEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
srvCfg.SetRegion(globalServerRegion)
|
||||
}
|
||||
|
||||
if globalIsEnvDomainName {
|
||||
srvCfg.Domain = globalDomainName
|
||||
}
|
||||
|
||||
if globalIsStorageClass {
|
||||
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
// Save the new config globally.
|
||||
// unlock the mutex.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return Save(getConfigFile(), globalServerConfig)
|
||||
}
|
||||
|
||||
// newQuickConfig - initialize a new server config, with an allocated
|
||||
// quick.Config interface.
|
||||
func newQuickConfig(srvCfg *serverConfig) (*serverConfig, error) {
|
||||
qcfg, err := quick.NewConfig(srvCfg, globalEtcdClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srvCfg.Config = qcfg
|
||||
return srvCfg, nil
|
||||
}
|
||||
|
||||
// getValidConfig - returns valid server configuration
|
||||
func getValidConfig() (*serverConfig, error) {
|
||||
srvCfg := &serverConfig{
|
||||
Region: globalMinioDefaultRegion,
|
||||
Browser: true,
|
||||
}
|
||||
|
||||
var err error
|
||||
srvCfg, err = newQuickConfig(srvCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configFile := getConfigFile()
|
||||
if err = srvCfg.Load(configFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srvCfg.Version != serverConfigVersion {
|
||||
return nil, fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", serverConfigVersion, srvCfg.Version)
|
||||
}
|
||||
|
||||
// Validate credential fields only when
|
||||
// they are not set via the environment
|
||||
// Error out if global is env credential is not set and config has invalid credential
|
||||
if !globalIsEnvCreds && !srvCfg.Credential.IsValid() {
|
||||
return nil, errors.New("invalid credential in config file " + getConfigFile())
|
||||
}
|
||||
|
||||
return srvCfg, nil
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params from env
|
||||
// if found and valid
|
||||
func loadConfig() error {
|
||||
srvCfg, err := getValidConfig()
|
||||
if err != nil {
|
||||
return uiErrInvalidConfig(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
srvCfg.SetCredential(globalActiveCred)
|
||||
}
|
||||
|
||||
if globalIsEnvBrowser {
|
||||
srvCfg.SetBrowser(globalIsBrowserEnabled)
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
srvCfg.SetRegion(globalServerRegion)
|
||||
}
|
||||
|
||||
if globalIsEnvDomainName {
|
||||
srvCfg.Domain = globalDomainName
|
||||
}
|
||||
|
||||
if globalIsStorageClass {
|
||||
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry, globalCacheMaxUse)
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
func (s *serverConfig) loadToCachedConfigs() {
|
||||
if !globalIsEnvCreds {
|
||||
globalActiveCred = globalServerConfig.GetCredential()
|
||||
}
|
||||
if !globalIsEnvBrowser {
|
||||
globalIsBrowserEnabled = globalServerConfig.GetBrowser()
|
||||
globalActiveCred = s.GetCredential()
|
||||
}
|
||||
if !globalIsEnvWORM {
|
||||
globalWORMEnabled = globalServerConfig.GetWorm()
|
||||
globalWORMEnabled = s.GetWorm()
|
||||
}
|
||||
if !globalIsEnvRegion {
|
||||
globalServerRegion = globalServerConfig.GetRegion()
|
||||
}
|
||||
if !globalIsEnvDomainName {
|
||||
globalDomainName = globalServerConfig.Domain
|
||||
globalServerRegion = s.GetRegion()
|
||||
}
|
||||
if !globalIsStorageClass {
|
||||
globalStandardStorageClass, globalRRStorageClass = globalServerConfig.GetStorageClass()
|
||||
globalStandardStorageClass, globalRRStorageClass = s.GetStorageClass()
|
||||
}
|
||||
if !globalIsDiskCacheEnabled {
|
||||
cacheConf := globalServerConfig.GetCacheConfig()
|
||||
cacheConf := s.GetCacheConfig()
|
||||
globalCacheDrives = cacheConf.Drives
|
||||
globalCacheExcludes = cacheConf.Exclude
|
||||
globalCacheExpiry = cacheConf.Expiry
|
||||
globalCacheMaxUse = cacheConf.MaxUse
|
||||
}
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
}
|
||||
|
||||
if !globalIsCompressionEnabled {
|
||||
compressionConf := s.GetCompressionConfig()
|
||||
globalCompressExtensions = compressionConf.Extensions
|
||||
globalCompressMimeTypes = compressionConf.MimeTypes
|
||||
globalIsCompressionEnabled = compressionConf.Enabled
|
||||
}
|
||||
|
||||
globalIAMValidators = getAuthValidators(s)
|
||||
|
||||
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
|
||||
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
|
||||
URL: s.Policy.OPA.URL,
|
||||
AuthToken: s.Policy.OPA.AuthToken,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// newSrvConfig - initialize a new server config, saves env parameters if
|
||||
// found, otherwise use default parameters
|
||||
func newSrvConfig(objAPI ObjectLayer) error {
|
||||
// Initialize server config.
|
||||
srvCfg := newServerConfig()
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.loadFromEnvs()
|
||||
|
||||
// Load values to cached global values.
|
||||
srvCfg.loadToCachedConfigs()
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
|
||||
}
|
||||
|
||||
// getValidConfig - returns valid server configuration
|
||||
func getValidConfig(objAPI ObjectLayer) (*serverConfig, error) {
|
||||
srvCfg, err := readServerConfig(context.Background(), objAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srvCfg, srvCfg.Validate()
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params from env
|
||||
// if found and valid
|
||||
func loadConfig(objAPI ObjectLayer) error {
|
||||
srvCfg, err := getValidConfig(objAPI)
|
||||
if err != nil {
|
||||
return uiErrInvalidConfig(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.loadFromEnvs()
|
||||
|
||||
// Load values to cached global values.
|
||||
srvCfg.loadToCachedConfigs()
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
globalServerConfig = srvCfg
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAuthValidators - returns ValidatorList which contains
|
||||
// enabled providers in server config.
|
||||
// A new authentication provider is added like below
|
||||
// * Add a new provider in pkg/iam/validator package.
|
||||
func getAuthValidators(config *serverConfig) *validator.Validators {
|
||||
validators := validator.NewValidators()
|
||||
|
||||
if config.OpenID.JWKS.URL != nil {
|
||||
validators.Add(validator.NewJWT(config.OpenID.JWKS))
|
||||
}
|
||||
|
||||
return validators
|
||||
}
|
||||
|
||||
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
// A new notification target is added like below
|
||||
// * Add a new target in pkg/event/target package.
|
||||
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
targetList := event.NewTargetList()
|
||||
|
||||
if config == nil {
|
||||
return targetList
|
||||
}
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -421,10 +653,14 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -433,22 +669,27 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewKafkaTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.MQTT {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -457,10 +698,12 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMySQLTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -469,10 +712,26 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNATSTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.NSQ {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNSQTarget(id, args)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -481,10 +740,12 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -493,22 +754,26 @@ func getNotificationTargets(config *serverConfig) (*event.TargetList, error) {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewRedisTarget(id, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget := target.NewWebhookTarget(id, args)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
return nil, err
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return targetList, nil
|
||||
return targetList
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
@@ -27,12 +27,15 @@ import (
|
||||
)
|
||||
|
||||
func TestServerConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
if globalServerConfig.GetRegion() != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalServerConfig.GetRegion())
|
||||
@@ -49,16 +52,12 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Errorf("Expecting version %s found %s", globalServerConfig.GetVersion(), serverConfigVersion)
|
||||
}
|
||||
|
||||
// Attempt to save.
|
||||
if err := globalServerConfig.Save(getConfigFile()); err != nil {
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
t.Fatalf("Unable to save updated config file %s", err)
|
||||
}
|
||||
|
||||
// Do this only once here.
|
||||
setConfigDir(rootPath)
|
||||
|
||||
// Initialize server config.
|
||||
if err := loadConfig(); err != nil {
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
@@ -68,6 +67,9 @@ func TestServerConfigWithEnvs(t *testing.T) {
|
||||
os.Setenv("MINIO_BROWSER", "off")
|
||||
defer os.Unsetenv("MINIO_BROWSER")
|
||||
|
||||
os.Setenv("MINIO_WORM", "on")
|
||||
defer os.Unsetenv("MINIO_WORM")
|
||||
|
||||
os.Setenv("MINIO_ACCESS_KEY", "minio")
|
||||
defer os.Unsetenv("MINIO_ACCESS_KEY")
|
||||
|
||||
@@ -82,61 +84,70 @@ func TestServerConfigWithEnvs(t *testing.T) {
|
||||
|
||||
defer resetGlobalIsEnvs()
|
||||
|
||||
// Get test root.
|
||||
rootPath, err := getTestRoot()
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
serverHandleEnvVars()
|
||||
|
||||
// Do this only once here.
|
||||
setConfigDir(rootPath)
|
||||
|
||||
// Init config
|
||||
initConfig()
|
||||
initConfig(objLayer)
|
||||
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
// Check if serverConfig has
|
||||
if globalServerConfig.GetBrowser() {
|
||||
t.Errorf("Expecting browser is set to false found %v", globalServerConfig.GetBrowser())
|
||||
// Check if serverConfig has browser disabled
|
||||
if globalIsBrowserEnabled {
|
||||
t.Error("Expected browser to be disabled but it is not")
|
||||
}
|
||||
|
||||
// Check if serverConfig has
|
||||
// Check if serverConfig returns WORM config from the env
|
||||
if !globalServerConfig.GetWorm() {
|
||||
t.Error("Expected WORM to be enabled but it is not")
|
||||
}
|
||||
|
||||
// Check if serverConfig has region from the environment
|
||||
if globalServerConfig.GetRegion() != "us-west-1" {
|
||||
t.Errorf("Expecting region to be \"us-west-1\" found %v", globalServerConfig.GetRegion())
|
||||
t.Errorf("Expected region to be \"us-west-1\", found %v", globalServerConfig.GetRegion())
|
||||
}
|
||||
|
||||
// Check if serverConfig has
|
||||
// Check if serverConfig has credentials from the environment
|
||||
cred := globalServerConfig.GetCredential()
|
||||
|
||||
if cred.AccessKey != "minio" {
|
||||
t.Errorf("Expecting access key to be `minio` found %s", cred.AccessKey)
|
||||
t.Errorf("Expected access key to be `minio`, found %s", cred.AccessKey)
|
||||
}
|
||||
|
||||
if cred.SecretKey != "minio123" {
|
||||
t.Errorf("Expecting access key to be `minio123` found %s", cred.SecretKey)
|
||||
t.Errorf("Expected access key to be `minio123`, found %s", cred.SecretKey)
|
||||
}
|
||||
|
||||
if globalServerConfig.Domain != "domain.com" {
|
||||
t.Errorf("Expecting Domain to be `domain.com` found " + globalServerConfig.Domain)
|
||||
// Check if serverConfig has the correct domain
|
||||
if globalDomainName != "domain.com" {
|
||||
t.Errorf("Expected Domain to be `domain.com`, found " + globalDomainName)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests config validator..
|
||||
func TestValidateConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
configPath := filepath.Join(rootPath, minioConfigFile)
|
||||
|
||||
configPath := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
v := serverConfigVersion
|
||||
|
||||
testCases := []struct {
|
||||
@@ -174,66 +185,69 @@ func TestValidateConfig(t *testing.T) {
|
||||
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
|
||||
|
||||
// Test 11 - Test AMQP
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
|
||||
// Test 14 - Test Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
|
||||
|
||||
// Test 15 - Test PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
|
||||
// Test 16 - Test Kafka
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
|
||||
|
||||
// Test 17 - Test Webhook
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
|
||||
|
||||
// Test 18 - Test MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
|
||||
// Test 19 - Test Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
|
||||
// Test 20 - Test valid Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 21 - Test Format for PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
|
||||
// Test 22 - Test valid Format for PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 23 - Test Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
|
||||
|
||||
// Test 24 - Test valid Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
|
||||
|
||||
// Test 25 - Test Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
|
||||
|
||||
// Test 26 - Test valid Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
||||
// Test 27 - Test MQTT
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, true},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": ""}}}}`, false},
|
||||
|
||||
// Test 28 - Test NSQ
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": ""} }}}`, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if werr := ioutil.WriteFile(configPath, []byte(testCase.configData), 0700); werr != nil {
|
||||
t.Fatal(werr)
|
||||
if err = saveConfig(context.Background(), objLayer, configPath, []byte(testCase.configData)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, verr := getValidConfig()
|
||||
if testCase.shouldPass && verr != nil {
|
||||
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, verr)
|
||||
_, err = getValidConfig(objLayer)
|
||||
if testCase.shouldPass && err != nil {
|
||||
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, err)
|
||||
}
|
||||
if !testCase.shouldPass && verr == nil {
|
||||
if !testCase.shouldPass && err == nil {
|
||||
t.Errorf("Test %d, should fail but it succeeded.", i+1)
|
||||
}
|
||||
}
|
||||
@@ -249,76 +263,98 @@ func TestConfigDiff(t *testing.T) {
|
||||
{&serverConfig{}, nil, "Given configuration is empty"},
|
||||
// 2
|
||||
{
|
||||
&serverConfig{Credential: auth.Credentials{"u1", "p1"}},
|
||||
&serverConfig{Credential: auth.Credentials{"u1", "p2"}},
|
||||
&serverConfig{Credential: auth.Credentials{
|
||||
AccessKey: "u1",
|
||||
SecretKey: "p1",
|
||||
Expiration: timeSentinel,
|
||||
}},
|
||||
&serverConfig{Credential: auth.Credentials{
|
||||
AccessKey: "u1",
|
||||
SecretKey: "p2",
|
||||
Expiration: timeSentinel,
|
||||
}},
|
||||
"Credential configuration differs",
|
||||
},
|
||||
// 3
|
||||
{&serverConfig{Region: "us-east-1"}, &serverConfig{Region: "us-west-1"}, "Region configuration differs"},
|
||||
// 4
|
||||
{&serverConfig{Browser: false}, &serverConfig{Browser: true}, "Browser configuration differs"},
|
||||
// 5
|
||||
{&serverConfig{Domain: "domain1"}, &serverConfig{Domain: "domain2"}, "Domain configuration differs"},
|
||||
// 6
|
||||
{
|
||||
&serverConfig{StorageClass: storageClassConfig{storageClass{"1", 8}, storageClass{"2", 6}}},
|
||||
&serverConfig{StorageClass: storageClassConfig{storageClass{"1", 8}, storageClass{"2", 4}}},
|
||||
"StorageClass configuration differs",
|
||||
},
|
||||
// 7
|
||||
// 5
|
||||
{
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: false}}}},
|
||||
"AMQP Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
// 6
|
||||
{
|
||||
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: false}}}},
|
||||
"NATS Notification configuration differs",
|
||||
},
|
||||
// 9
|
||||
// 7
|
||||
{
|
||||
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{NSQ: map[string]target.NSQArgs{"1": {Enable: false}}}},
|
||||
"NSQ Notification configuration differs",
|
||||
},
|
||||
// 8
|
||||
{
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}},
|
||||
"ElasticSearch Notification configuration differs",
|
||||
},
|
||||
// 10
|
||||
// 9
|
||||
{
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}},
|
||||
"Redis Notification configuration differs",
|
||||
},
|
||||
// 11
|
||||
// 10
|
||||
{
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}},
|
||||
"PostgreSQL Notification configuration differs",
|
||||
},
|
||||
// 12
|
||||
// 11
|
||||
{
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}},
|
||||
"Kafka Notification configuration differs",
|
||||
},
|
||||
// 13
|
||||
// 12
|
||||
{
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}},
|
||||
"Webhook Notification configuration differs",
|
||||
},
|
||||
// 14
|
||||
// 13
|
||||
{
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}},
|
||||
"MySQL Notification configuration differs",
|
||||
},
|
||||
// 15
|
||||
// 14
|
||||
{
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}},
|
||||
&serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}},
|
||||
"MQTT Notification configuration differs",
|
||||
},
|
||||
// 15
|
||||
{
|
||||
&serverConfig{Logger: loggerConfig{
|
||||
Console: loggerConsole{Enabled: true},
|
||||
HTTP: map[string]loggerHTTP{"1": {Endpoint: "http://address1"}},
|
||||
}},
|
||||
&serverConfig{Logger: loggerConfig{
|
||||
Console: loggerConsole{Enabled: true},
|
||||
HTTP: map[string]loggerHTTP{"1": {Endpoint: "http://address2"}},
|
||||
}},
|
||||
"Logger configuration differs",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
@@ -28,9 +27,6 @@ const (
|
||||
// Default minio configuration directory where below configuration files/directories are stored.
|
||||
defaultMinioConfigDir = ".minio"
|
||||
|
||||
// Minio configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// Directory contains below files/directories for HTTPS configuration.
|
||||
certsDir = "certs"
|
||||
|
||||
@@ -44,55 +40,9 @@ const (
|
||||
privateKeyFile = "private.key"
|
||||
)
|
||||
|
||||
// ConfigDir - configuration directory with locking.
|
||||
// ConfigDir - points to a user set directory.
|
||||
type ConfigDir struct {
|
||||
sync.Mutex
|
||||
dir string
|
||||
}
|
||||
|
||||
// Set - saves given directory as configuration directory.
|
||||
func (config *ConfigDir) Set(dir string) {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
config.dir = dir
|
||||
}
|
||||
|
||||
// Get - returns current configuration directory.
|
||||
func (config *ConfigDir) Get() string {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
return config.dir
|
||||
}
|
||||
|
||||
func (config *ConfigDir) getCertsDir() string {
|
||||
return filepath.Join(config.Get(), certsDir)
|
||||
}
|
||||
|
||||
// GetCADir - returns certificate CA directory.
|
||||
func (config *ConfigDir) GetCADir() string {
|
||||
return filepath.Join(config.getCertsDir(), certsCADir)
|
||||
}
|
||||
|
||||
// Create - creates configuration directory tree.
|
||||
func (config *ConfigDir) Create() error {
|
||||
return os.MkdirAll(config.GetCADir(), 0700)
|
||||
}
|
||||
|
||||
// GetMinioConfigFile - returns absolute path of config.json file.
|
||||
func (config *ConfigDir) GetMinioConfigFile() string {
|
||||
return filepath.Join(config.Get(), minioConfigFile)
|
||||
}
|
||||
|
||||
// GetPublicCertFile - returns absolute path of public.crt file.
|
||||
func (config *ConfigDir) GetPublicCertFile() string {
|
||||
return filepath.Join(config.getCertsDir(), publicCertFile)
|
||||
}
|
||||
|
||||
// GetPrivateKeyFile - returns absolute path of private.key file.
|
||||
func (config *ConfigDir) GetPrivateKeyFile() string {
|
||||
return filepath.Join(config.getCertsDir(), privateKeyFile)
|
||||
path string
|
||||
}
|
||||
|
||||
func getDefaultConfigDir() string {
|
||||
@@ -104,32 +54,54 @@ func getDefaultConfigDir() string {
|
||||
return filepath.Join(homeDir, defaultMinioConfigDir)
|
||||
}
|
||||
|
||||
var configDir = &ConfigDir{dir: getDefaultConfigDir()}
|
||||
|
||||
func setConfigDir(dir string) {
|
||||
configDir.Set(dir)
|
||||
func getDefaultCertsDir() string {
|
||||
return filepath.Join(getDefaultConfigDir(), certsDir)
|
||||
}
|
||||
|
||||
func getConfigDir() string {
|
||||
return configDir.Get()
|
||||
func getDefaultCertsCADir() string {
|
||||
return filepath.Join(getDefaultCertsDir(), certsCADir)
|
||||
}
|
||||
|
||||
func getCADir() string {
|
||||
return configDir.GetCADir()
|
||||
var (
|
||||
// Default config, certs and CA directories.
|
||||
defaultConfigDir = &ConfigDir{path: getDefaultConfigDir()}
|
||||
defaultCertsDir = &ConfigDir{path: getDefaultCertsDir()}
|
||||
defaultCertsCADir = &ConfigDir{path: getDefaultCertsCADir()}
|
||||
|
||||
// Points to current configuration directory -- deprecated, to be removed in future.
|
||||
globalConfigDir = defaultConfigDir
|
||||
// Points to current certs directory set by user with --certs-dir
|
||||
globalCertsDir = defaultCertsDir
|
||||
// Points to relative path to certs directory and is <value-of-certs-dir>/CAs
|
||||
globalCertsCADir = defaultCertsCADir
|
||||
)
|
||||
|
||||
// Get - returns current directory.
|
||||
func (dir *ConfigDir) Get() string {
|
||||
return dir.path
|
||||
}
|
||||
|
||||
func createConfigDir() error {
|
||||
return configDir.Create()
|
||||
// Attempts to create all directories, ignores any permission denied errors.
|
||||
func mkdirAllIgnorePerm(path string) error {
|
||||
err := os.MkdirAll(path, 0700)
|
||||
if err != nil {
|
||||
// It is possible in kubernetes like deployments this directory
|
||||
// is already mounted and is not writable, ignore any write errors.
|
||||
if os.IsPermission(err) {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getConfigFile() string {
|
||||
return configDir.GetMinioConfigFile()
|
||||
return filepath.Join(globalConfigDir.Get(), minioConfigFile)
|
||||
}
|
||||
|
||||
func getPublicCertFile() string {
|
||||
return configDir.GetPublicCertFile()
|
||||
return filepath.Join(globalCertsDir.Get(), publicCertFile)
|
||||
}
|
||||
|
||||
func getPrivateKeyFile() string {
|
||||
return configDir.GetPrivateKeyFile()
|
||||
return filepath.Join(globalCertsDir.Get(), privateKeyFile)
|
||||
}
|
||||
|
||||
@@ -17,15 +17,20 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
@@ -33,7 +38,22 @@ import (
|
||||
// DO NOT EDIT following message template, please open a github issue to discuss instead.
|
||||
var configMigrateMSGTemplate = "Configuration file %s migrated from version '%s' to '%s' successfully."
|
||||
|
||||
// Migrates all config versions from "1" to "18".
|
||||
// Save config file to corresponding backend
|
||||
func Save(configFile string, data interface{}) error {
|
||||
return quick.SaveConfig(data, configFile, globalEtcdClient)
|
||||
}
|
||||
|
||||
// Load config from backend
|
||||
func Load(configFile string, data interface{}) (quick.Config, error) {
|
||||
return quick.LoadConfig(configFile, globalEtcdClient, data)
|
||||
}
|
||||
|
||||
// GetVersion gets config version from backend
|
||||
func GetVersion(configFile string) (string, error) {
|
||||
return quick.GetVersion(configFile, globalEtcdClient)
|
||||
}
|
||||
|
||||
// Migrates all config versions from "1" to "28".
|
||||
func migrateConfig() error {
|
||||
// Purge all configs with version '1',
|
||||
// this is a special case since version '1' used
|
||||
@@ -45,6 +65,9 @@ func migrateConfig() error {
|
||||
// Load only config version information.
|
||||
version, err := GetVersion(getConfigFile())
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -188,6 +211,16 @@ func migrateConfig() error {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "26":
|
||||
if err = migrateV26ToV27(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "27":
|
||||
if err = migrateV27ToV28(); err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case serverConfigVersion:
|
||||
// No migration needed. this always points to current version.
|
||||
err = nil
|
||||
@@ -197,11 +230,11 @@ func migrateConfig() error {
|
||||
|
||||
// Version '1' is not supported anymore and deprecated, safe to delete.
|
||||
func purgeV1() error {
|
||||
configFile := filepath.Join(getConfigDir(), "fsUsers.json")
|
||||
configFile := filepath.Join(globalConfigDir.Get(), "fsUsers.json")
|
||||
|
||||
cv1 := &configV1{}
|
||||
_, err := Load(configFile, cv1)
|
||||
if os.IsNotExist(err) || etcd.IsKeyNotFound(err) {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘1’. %v", err)
|
||||
@@ -885,7 +918,7 @@ func migrateV12ToV13() error {
|
||||
// Copy over fields from V12 into V13 config struct
|
||||
srvConfig := &serverConfigV13{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "13"
|
||||
srvConfig.Credential = cv12.Credential
|
||||
@@ -965,7 +998,7 @@ func migrateV13ToV14() error {
|
||||
// Copy over fields from V13 into V14 config struct
|
||||
srvConfig := &serverConfigV14{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "14"
|
||||
srvConfig.Credential = cv13.Credential
|
||||
@@ -1050,7 +1083,7 @@ func migrateV14ToV15() error {
|
||||
// Copy over fields from V14 into V15 config struct
|
||||
srvConfig := &serverConfigV15{
|
||||
Logger: &loggerV7{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "15"
|
||||
srvConfig.Credential = cv14.Credential
|
||||
@@ -1140,7 +1173,7 @@ func migrateV15ToV16() error {
|
||||
// Copy over fields from V15 into V16 config struct
|
||||
srvConfig := &serverConfigV16{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "16"
|
||||
srvConfig.Credential = cv15.Credential
|
||||
@@ -1230,7 +1263,7 @@ func migrateV16ToV17() error {
|
||||
// Copy over fields from V16 into V17 config struct
|
||||
srvConfig := &serverConfigV17{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "17"
|
||||
srvConfig.Credential = cv16.Credential
|
||||
@@ -1351,7 +1384,7 @@ func migrateV17ToV18() error {
|
||||
// Copy over fields from V17 into V18 config struct
|
||||
srvConfig := &serverConfigV17{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "18"
|
||||
srvConfig.Credential = cv17.Credential
|
||||
@@ -1453,7 +1486,7 @@ func migrateV18ToV19() error {
|
||||
// Copy over fields from V18 into V19 config struct
|
||||
srvConfig := &serverConfigV18{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "19"
|
||||
srvConfig.Credential = cv18.Credential
|
||||
@@ -1559,7 +1592,7 @@ func migrateV19ToV20() error {
|
||||
// Copy over fields from V19 into V20 config struct
|
||||
srvConfig := &serverConfigV20{
|
||||
Logger: &loggers{},
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "20"
|
||||
srvConfig.Credential = cv19.Credential
|
||||
@@ -1663,7 +1696,7 @@ func migrateV20ToV21() error {
|
||||
|
||||
// Copy over fields from V20 into V21 config struct
|
||||
srvConfig := &serverConfigV21{
|
||||
Notify: ¬ifier{},
|
||||
Notify: ¬ifierV3{},
|
||||
}
|
||||
srvConfig.Version = "21"
|
||||
srvConfig.Credential = cv20.Credential
|
||||
@@ -1767,7 +1800,7 @@ func migrateV21ToV22() error {
|
||||
|
||||
// Copy over fields from V21 into V22 config struct
|
||||
srvConfig := &serverConfigV22{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "22"
|
||||
srvConfig.Credential = cv21.Credential
|
||||
@@ -1871,7 +1904,7 @@ func migrateV22ToV23() error {
|
||||
|
||||
// Copy over fields from V22 into V23 config struct
|
||||
srvConfig := &serverConfigV23{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "23"
|
||||
srvConfig.Credential = cv22.Credential
|
||||
@@ -1984,7 +2017,7 @@ func migrateV23ToV24() error {
|
||||
|
||||
// Copy over fields from V23 into V24 config struct
|
||||
srvConfig := &serverConfigV24{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "24"
|
||||
srvConfig.Credential = cv23.Credential
|
||||
@@ -2097,7 +2130,7 @@ func migrateV24ToV25() error {
|
||||
|
||||
// Copy over fields from V24 into V25 config struct
|
||||
srvConfig := &serverConfigV25{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "25"
|
||||
srvConfig.Credential = cv24.Credential
|
||||
@@ -2215,7 +2248,7 @@ func migrateV25ToV26() error {
|
||||
|
||||
// Copy over fields from V25 into V26 config struct
|
||||
srvConfig := &serverConfigV26{
|
||||
Notify: notifier{},
|
||||
Notify: notifierV3{},
|
||||
}
|
||||
srvConfig.Version = "26"
|
||||
srvConfig.Credential = cv25.Credential
|
||||
@@ -2317,3 +2350,386 @@ func migrateV25ToV26() error {
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv25.Version, srvConfig.Version)
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV26ToV27() error {
|
||||
configFile := getConfigFile()
|
||||
|
||||
// config V27 is backward compatible with V26, load the old
|
||||
// config file in serverConfigV27 struct and put some examples
|
||||
// in the new `logger` field
|
||||
srvConfig := &serverConfigV27{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
|
||||
if srvConfig.Version != "26" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srvConfig.Version = "27"
|
||||
// Enable console logging by default to avoid breaking users
|
||||
// current deployments
|
||||
srvConfig.Logger.Console.Enabled = true
|
||||
srvConfig.Logger.HTTP = make(map[string]loggerHTTP)
|
||||
srvConfig.Logger.HTTP["1"] = loggerHTTP{}
|
||||
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘26’ to ‘27’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "26", "27")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV27ToV28() error {
|
||||
configFile := getConfigFile()
|
||||
|
||||
// config V28 is backward compatible with V27, load the old
|
||||
// config file in serverConfigV28 struct and initialize KMSConfig
|
||||
|
||||
srvConfig := &serverConfigV28{}
|
||||
_, err := quick.LoadConfig(configFile, globalEtcdClient, srvConfig)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
|
||||
if srvConfig.Version != "27" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srvConfig.Version = "28"
|
||||
srvConfig.KMS = crypto.KMSConfig{}
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Migrates ${HOME}/.minio/config.json to '<export_path>/.minio.sys/config/config.json'
|
||||
func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// Construct path to config.json for the given bucket.
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Verify if config was already available in .minio.sys in which case, nothing more to be done.
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
if err == nil {
|
||||
os.Rename(getConfigFile(), getConfigFile()+".deprecated")
|
||||
}
|
||||
}()
|
||||
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
// Verify if backend already has the file (after holding lock)
|
||||
if err = checkConfig(context.Background(), objAPI, configFile); err != errConfigNotFound {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var config = &serverConfig{}
|
||||
if _, err = Load(getConfigFile(), config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Read from deprecate file as well if necessary.
|
||||
if _, err = Load(getConfigFile()+".deprecated", config); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// If all else fails simply initialize the server config.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' to v33.
|
||||
func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Check if the config version is latest, if not migrate.
|
||||
ok, _, err := checkConfigVersion(objAPI, configFile, serverConfigVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct path to config.json for the given bucket.
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
if err := migrateV27ToV28MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV28ToV29MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV29ToV30MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV30ToV31MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateV31ToV32MinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
return migrateV32ToV33MinioSys(objAPI)
|
||||
}
|
||||
|
||||
func checkConfigVersion(objAPI ObjectLayer, configFile string, version string) (bool, []byte, error) {
|
||||
data, err := readConfig(context.Background(), objAPI, configFile)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
var versionConfig struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
vcfg := &versionConfig
|
||||
if err = json.Unmarshal(data, vcfg); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return vcfg.Version == version, data, nil
|
||||
}
|
||||
|
||||
func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "27")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV28{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "28"
|
||||
cfg.KMS = crypto.KMSConfig{}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "28")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV29{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "29"
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘28’ to ‘29’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "28", "29")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "29")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV30{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "30"
|
||||
// Init compression config.For future migration, Compression config needs to be copied over from previous version.
|
||||
cfg.Compression.Enabled = false
|
||||
cfg.Compression.Extensions = globalCompressExtensions
|
||||
cfg.Compression.MimeTypes = globalCompressMimeTypes
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘29’ to ‘30’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "29", "30")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "30")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV31{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "31"
|
||||
cfg.OpenID.JWKS = validator.JWKSArgs{
|
||||
URL: &xnet.URL{},
|
||||
}
|
||||
cfg.Policy.OPA = iampolicy.OpaArgs{
|
||||
URL: &xnet.URL{},
|
||||
AuthToken: "",
|
||||
}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘30’ to ‘31’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "30", "31")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "31")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV32{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "32"
|
||||
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
cfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘31’ to ‘32’. %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "31", "32")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "32")
|
||||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV33{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Version = "33"
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = saveConfig(context.Background(), objAPI, configFile, data); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from 32 to 33 . %v", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,14 +25,25 @@ import (
|
||||
|
||||
// Test if config v1 is purged
|
||||
func TestServerConfigMigrateV1(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
err = newTestConfig(globalMinioDefaultRegion, objLayer)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Create a V1 config json file and store it
|
||||
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
||||
@@ -45,13 +56,14 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Check if config v1 is removed from filesystem
|
||||
if _, err := os.Stat(configPath); err == nil || !os.IsNotExist(err) {
|
||||
t.Fatal("Config V1 file is not purged")
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(); err != nil {
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
@@ -59,20 +71,13 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
// Test if all migrate code returns nil when config file does not
|
||||
// exist
|
||||
func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Remove config file
|
||||
if err := os.Remove(configPath); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
if err := migrateV2ToV3(); err != nil {
|
||||
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
|
||||
@@ -134,18 +139,42 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
|
||||
if err := migrateV21ToV22(); err != nil {
|
||||
t.Fatal("migrate v21 to v22 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV22ToV23(); err != nil {
|
||||
t.Fatal("migrate v22 to v23 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV23ToV24(); err != nil {
|
||||
t.Fatal("migrate v23 to v24 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV24ToV25(); err != nil {
|
||||
t.Fatal("migrate v24 to v25 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV25ToV26(); err != nil {
|
||||
t.Fatal("migrate v25 to v26 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV26ToV27(); err != nil {
|
||||
t.Fatal("migrate v26 to v27 should succeed when no config file is found")
|
||||
}
|
||||
if err := migrateV27ToV28(); err != nil {
|
||||
t.Fatal("migrate v27 to v28 should succeed when no config file is found")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if a config migration from v2 to v23 is successfully done
|
||||
func TestServerConfigMigrateV2toV23(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
// Test if a config migration from v2 to v33 is successfully done
|
||||
func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
@@ -171,8 +200,16 @@ func TestServerConfigMigrateV2toV23(t *testing.T) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err := migrateConfigToMinioSys(objLayer); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err := migrateMinioSysConfig(objLayer); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(); err != nil {
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
|
||||
@@ -186,6 +223,7 @@ func TestServerConfigMigrateV2toV23(t *testing.T) {
|
||||
if globalServerConfig.Credential.AccessKey != accessKey {
|
||||
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, globalServerConfig.Credential.AccessKey)
|
||||
}
|
||||
|
||||
if globalServerConfig.Credential.SecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, globalServerConfig.Credential.SecretKey)
|
||||
}
|
||||
@@ -193,14 +231,13 @@ func TestServerConfigMigrateV2toV23(t *testing.T) {
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
@@ -272,18 +309,32 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
if err := migrateV22ToV23(); err == nil {
|
||||
t.Fatal("migrateConfigV22ToV23() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV23ToV24(); err == nil {
|
||||
t.Fatal("migrateConfigV23ToV24() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV24ToV25(); err == nil {
|
||||
t.Fatal("migrateConfigV24ToV25() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV25ToV26(); err == nil {
|
||||
t.Fatal("migrateConfigV25ToV26() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV26ToV27(); err == nil {
|
||||
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
|
||||
}
|
||||
if err := migrateV27ToV28(); err == nil {
|
||||
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
|
||||
}
|
||||
}
|
||||
|
||||
// Test if all migrate code returns error with corrupted config files
|
||||
func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
setConfigDir(rootPath)
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
|
||||
@@ -19,8 +19,11 @@ package cmd
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -370,7 +373,7 @@ type serverConfigV12 struct {
|
||||
Notify notifierV2 `json:"notify"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
type notifierV3 struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
@@ -395,7 +398,7 @@ type serverConfigV13 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV14 server configuration version '14' which is like
|
||||
@@ -412,7 +415,7 @@ type serverConfigV14 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV15 server configuration version '15' which is like
|
||||
@@ -429,7 +432,7 @@ type serverConfigV15 struct {
|
||||
Logger *loggerV7 `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// FileLogger is introduced to workaround the dependency about logrus
|
||||
@@ -467,7 +470,7 @@ type serverConfigV16 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV17 server configuration version '17' which is like
|
||||
@@ -486,7 +489,7 @@ type serverConfigV17 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV18 server configuration version '18' which is like
|
||||
@@ -505,7 +508,7 @@ type serverConfigV18 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV19 server configuration version '19' which is like
|
||||
@@ -523,7 +526,7 @@ type serverConfigV19 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV20 server configuration version '20' which is like
|
||||
@@ -542,7 +545,7 @@ type serverConfigV20 struct {
|
||||
Logger *loggers `json:"logger"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV21 is just like version '20' without logger field
|
||||
@@ -557,7 +560,7 @@ type serverConfigV21 struct {
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify *notifier `json:"notify"`
|
||||
Notify *notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV22 is just like version '21' with added support
|
||||
@@ -578,7 +581,7 @@ type serverConfigV22 struct {
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV23 is just like version '22' with addition of cache field.
|
||||
@@ -601,7 +604,7 @@ type serverConfigV23 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV24 is just like version '23', we had to revert
|
||||
@@ -625,7 +628,7 @@ type serverConfigV24 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV25 is just like version '24', stores additionally
|
||||
@@ -652,14 +655,11 @@ type serverConfigV25 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
// serverConfigV26 is just like version '25', stores additionally
|
||||
// cache max use value in 'CacheConfig'.
|
||||
//
|
||||
// IMPORTANT NOTE: When updating this struct make sure that
|
||||
// serverConfig.ConfigDiff() is updated as necessary.
|
||||
type serverConfigV26 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
@@ -679,5 +679,260 @@ type serverConfigV26 struct {
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
Notify notifierV3 `json:"notify"`
|
||||
}
|
||||
|
||||
type loggerConsole struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type loggerHTTP struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
}
|
||||
|
||||
type loggerConfig struct {
|
||||
Console loggerConsole `json:"console"`
|
||||
HTTP map[string]loggerHTTP `json:"http"`
|
||||
}
|
||||
|
||||
// serverConfigV27 is just like version '26', stores additionally
|
||||
// the logger field
|
||||
//
|
||||
// IMPORTANT NOTE: When updating this struct make sure that
|
||||
// serverConfig.ConfigDiff() is updated as necessary.
|
||||
type serverConfigV27 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Browser BoolFlag `json:"browser"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV28 is just like version '27', additionally
|
||||
// storing KMS config
|
||||
//
|
||||
// IMPORTANT NOTE: When updating this struct make sure that
|
||||
// serverConfig.ConfigDiff() is updated as necessary.
|
||||
type serverConfigV28 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
}
|
||||
|
||||
// serverConfigV29 is just like version '28'.
|
||||
type serverConfigV29 serverConfigV28
|
||||
|
||||
// compressionConfig represents the compression settings.
|
||||
type compressionConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Extensions []string `json:"extensions"`
|
||||
MimeTypes []string `json:"mime-types"`
|
||||
}
|
||||
|
||||
// serverConfigV30 is just like version '29', stores additionally
|
||||
// extensions and mimetypes fields for compression.
|
||||
type serverConfigV30 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
}
|
||||
|
||||
// serverConfigV31 is just like version '30', with OPA and OpenID configuration.
|
||||
type serverConfigV31 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifierV3 `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
AMQP map[string]target.AMQPArgs `json:"amqp"`
|
||||
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
|
||||
Kafka map[string]target.KafkaArgs `json:"kafka"`
|
||||
MQTT map[string]target.MQTTArgs `json:"mqtt"`
|
||||
MySQL map[string]target.MySQLArgs `json:"mysql"`
|
||||
NATS map[string]target.NATSArgs `json:"nats"`
|
||||
NSQ map[string]target.NSQArgs `json:"nsq"`
|
||||
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
|
||||
Redis map[string]target.RedisArgs `json:"redis"`
|
||||
Webhook map[string]target.WebhookArgs `json:"webhook"`
|
||||
}
|
||||
|
||||
// serverConfigV32 is just like version '31' with added nsq notifer.
|
||||
type serverConfigV32 struct {
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir with MQTT.
|
||||
type serverConfigV33 struct {
|
||||
quick.Config `json:"-"` // ignore interfaces
|
||||
|
||||
Version string `json:"version"`
|
||||
|
||||
// S3 API configuration.
|
||||
Credential auth.Credentials `json:"credential"`
|
||||
Region string `json:"region"`
|
||||
Worm BoolFlag `json:"worm"`
|
||||
|
||||
// Storage class configuration
|
||||
StorageClass storageClassConfig `json:"storageclass"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `json:"cache"`
|
||||
|
||||
// KMS configuration
|
||||
KMS crypto.KMSConfig `json:"kms"`
|
||||
|
||||
// Notification queue configuration.
|
||||
Notify notifier `json:"notify"`
|
||||
|
||||
// Logger configuration
|
||||
Logger loggerConfig `json:"logger"`
|
||||
|
||||
// Compression configuration
|
||||
Compression compressionConfig `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS validator.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
}
|
||||
|
||||
201
cmd/config.go
Normal file
201
cmd/config.go
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
const (
|
||||
minioConfigPrefix = "config"
|
||||
|
||||
// Minio configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// Minio backup file
|
||||
minioConfigBackupFile = minioConfigFile + ".backup"
|
||||
)
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverConfig) error {
|
||||
if err := quick.CheckData(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(config, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
return saveConfigEtcd(ctx, globalEtcdClient, configFile, data)
|
||||
}
|
||||
|
||||
// Create a backup of the current config
|
||||
oldData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err == nil {
|
||||
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
|
||||
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save the new config in the std config path
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
|
||||
var configData []byte
|
||||
var err error
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if globalEtcdClient != nil {
|
||||
configData, err = readConfigEtcd(ctx, globalEtcdClient, configFile)
|
||||
} else {
|
||||
configData, err = readConfig(ctx, objAPI, configFile)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
configData = bytes.Replace(configData, []byte("\r\n"), []byte("\n"), -1)
|
||||
}
|
||||
|
||||
if err = quick.CheckDuplicateKeys(string(configData)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config = &serverConfig{}
|
||||
if err = json.Unmarshal(configData, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = quick.CheckData(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// ConfigSys - config system.
|
||||
type ConfigSys struct{}
|
||||
|
||||
// Load - load config.json.
|
||||
func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
|
||||
return sys.Init(objAPI)
|
||||
}
|
||||
|
||||
// Init - initializes config system from config.json.
|
||||
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Initializing configuration needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
// of the object layer.
|
||||
// - Write quorum not met when upgrading configuration
|
||||
// version is needed.
|
||||
retryTimerCh := newRetryTimerSimple(doneCh)
|
||||
for {
|
||||
select {
|
||||
case _ = <-retryTimerCh:
|
||||
err := initConfig(objAPI)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
|
||||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
|
||||
logger.Info("Waiting for configuration to be initialized..")
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewConfigSys - creates new config system object.
|
||||
func NewConfigSys() *ConfigSys {
|
||||
return &ConfigSys{}
|
||||
}
|
||||
|
||||
// Initialize and load config from remote etcd or local config directory
|
||||
func initConfig(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
if globalEtcdClient != nil {
|
||||
if err := checkConfigEtcd(context.Background(), globalEtcdClient, getConfigFile()); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
// Migrates all configs at old location.
|
||||
if err = migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrates etcd ${HOME}/.minio/config.json to '/config/config.json'
|
||||
if err = migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Watch config for changes and reloads them.
|
||||
go watchConfigEtcd(objAPI, configFile, loadConfig)
|
||||
|
||||
} else {
|
||||
if isFile(getConfigFile()) {
|
||||
if err := migrateConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
// ignore if the file doesn't exist.
|
||||
if err := migrateConfigToMinioSys(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
|
||||
if err := migrateMinioSysConfig(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return loadConfig(objAPI)
|
||||
}
|
||||
@@ -17,90 +17,52 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Writes S3 compatible copy part range error.
|
||||
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
|
||||
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL, browser bool) {
|
||||
switch err {
|
||||
case errInvalidRange:
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRange, url, browser)
|
||||
return
|
||||
case errInvalidRangeSource:
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
|
||||
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url, browser)
|
||||
return
|
||||
default:
|
||||
writeErrorResponse(w, ErrInternalError, url)
|
||||
writeErrorResponse(w, ErrInternalError, url, browser)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Parses x-amz-copy-source-range for CopyObjectPart API. Specifically written to
|
||||
// differentiate the behavior between regular httpRange header v/s x-amz-copy-source-range.
|
||||
// The range of bytes to copy from the source object. The range value must use the form
|
||||
// bytes=first-last, where the first and last are the zero-based byte offsets to copy.
|
||||
// For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source.
|
||||
// Parses x-amz-copy-source-range for CopyObjectPart API. Its behavior
|
||||
// is different from regular HTTP range header. It only supports the
|
||||
// form `bytes=first-last` where first and last are zero-based byte
|
||||
// offsets. See
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
func parseCopyPartRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) {
|
||||
// Return error if given range string doesn't start with byte range prefix.
|
||||
if !strings.HasPrefix(rangeString, byteRangePrefix) {
|
||||
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix)
|
||||
// for full details. This function treats an empty rangeString as
|
||||
// referring to the whole resource.
|
||||
func parseCopyPartRangeSpec(rangeString string) (hrange *HTTPRangeSpec, err error) {
|
||||
hrange, err = parseRequestRangeSpec(rangeString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Trim byte range prefix.
|
||||
byteRangeString := strings.TrimPrefix(rangeString, byteRangePrefix)
|
||||
|
||||
// Check if range string contains delimiter '-', else return error. eg. "bytes=8"
|
||||
sepIndex := strings.Index(byteRangeString, "-")
|
||||
if sepIndex == -1 {
|
||||
if hrange.IsSuffixLength || hrange.Start < 0 || hrange.End < 0 {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
|
||||
offsetBeginString := byteRangeString[:sepIndex]
|
||||
offsetBegin := int64(-1)
|
||||
// Convert offsetBeginString only if its not empty.
|
||||
if len(offsetBeginString) > 0 {
|
||||
if !validBytePos.MatchString(offsetBeginString) {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
}
|
||||
|
||||
offsetEndString := byteRangeString[sepIndex+1:]
|
||||
offsetEnd := int64(-1)
|
||||
// Convert offsetEndString only if its not empty.
|
||||
if len(offsetEndString) > 0 {
|
||||
if !validBytePos.MatchString(offsetEndString) {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
}
|
||||
|
||||
// rangeString contains first byte positions. eg. "bytes=2-" or
|
||||
// rangeString contains last bye positions. eg. "bytes=-2"
|
||||
if offsetBegin == -1 || offsetEnd == -1 {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
|
||||
// Last byte position should not be greater than first byte
|
||||
// position. eg. "bytes=5-2"
|
||||
if offsetBegin > offsetEnd {
|
||||
return nil, errInvalidRange
|
||||
}
|
||||
|
||||
// First and last byte positions should not be >= resourceSize.
|
||||
if offsetBegin >= resourceSize || offsetEnd >= resourceSize {
|
||||
return nil, errInvalidRangeSource
|
||||
}
|
||||
|
||||
// Success..
|
||||
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
|
||||
return hrange, nil
|
||||
}
|
||||
|
||||
// checkCopyPartRangeWithSize adds more check to the range string in case of
|
||||
// copy object part. This API requires having specific start and end range values
|
||||
// e.g. 'bytes=3-10'. Other use cases will be rejected.
|
||||
func checkCopyPartRangeWithSize(rs *HTTPRangeSpec, resourceSize int64) (err error) {
|
||||
if rs == nil {
|
||||
return nil
|
||||
}
|
||||
if rs.IsSuffixLength || rs.Start >= resourceSize || rs.End >= resourceSize {
|
||||
return errInvalidRangeSource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,35 +19,37 @@ package cmd
|
||||
import "testing"
|
||||
|
||||
// Test parseCopyPartRange()
|
||||
func TestParseCopyPartRange(t *testing.T) {
|
||||
func TestParseCopyPartRangeSpec(t *testing.T) {
|
||||
// Test success cases.
|
||||
successCases := []struct {
|
||||
rangeString string
|
||||
offsetBegin int64
|
||||
offsetEnd int64
|
||||
length int64
|
||||
}{
|
||||
{"bytes=2-5", 2, 5, 4},
|
||||
{"bytes=2-9", 2, 9, 8},
|
||||
{"bytes=2-2", 2, 2, 1},
|
||||
{"bytes=0000-0006", 0, 6, 7},
|
||||
{"bytes=2-5", 2, 5},
|
||||
{"bytes=2-9", 2, 9},
|
||||
{"bytes=2-2", 2, 2},
|
||||
{"bytes=0000-0006", 0, 6},
|
||||
}
|
||||
objectSize := int64(10)
|
||||
|
||||
for _, successCase := range successCases {
|
||||
hrange, err := parseCopyPartRange(successCase.rangeString, 10)
|
||||
rs, err := parseCopyPartRangeSpec(successCase.rangeString)
|
||||
if err != nil {
|
||||
t.Fatalf("expected: <nil>, got: %s", err)
|
||||
}
|
||||
|
||||
if hrange.offsetBegin != successCase.offsetBegin {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin)
|
||||
start, length, err1 := rs.GetOffsetLength(objectSize)
|
||||
if err1 != nil {
|
||||
t.Fatalf("expected: <nil>, got: %s", err1)
|
||||
}
|
||||
|
||||
if hrange.offsetEnd != successCase.offsetEnd {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd)
|
||||
if start != successCase.offsetBegin {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, start)
|
||||
}
|
||||
if hrange.getLength() != successCase.length {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
|
||||
|
||||
if start+length-1 != successCase.offsetEnd {
|
||||
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, start+length-1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,15 +61,16 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
"bytes=2-+5",
|
||||
"bytes=2--5",
|
||||
"bytes=-",
|
||||
"",
|
||||
"2-5",
|
||||
"bytes = 2-5",
|
||||
"bytes=2 - 5",
|
||||
"bytes=0-0,-1",
|
||||
"bytes=2-5 ",
|
||||
"bytes=-1",
|
||||
"bytes=1-",
|
||||
}
|
||||
for _, rangeString := range invalidRangeStrings {
|
||||
if _, err := parseCopyPartRange(rangeString, 10); err == nil {
|
||||
if _, err := parseCopyPartRangeSpec(rangeString); err == nil {
|
||||
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
|
||||
}
|
||||
}
|
||||
@@ -78,8 +81,14 @@ func TestParseCopyPartRange(t *testing.T) {
|
||||
"bytes=20-30",
|
||||
}
|
||||
for _, rangeString := range errorRangeString {
|
||||
if _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
|
||||
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||
rs, err := parseCopyPartRangeSpec(rangeString)
|
||||
if err == nil {
|
||||
err1 := checkCopyPartRangeWithSize(rs, objectSize)
|
||||
if err1 != errInvalidRangeSource {
|
||||
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected: %s, got: <nil>", errInvalidRangeSource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
21
cmd/crypto/config.go
Normal file
21
cmd/crypto/config.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
// KMSConfig has the KMS config for hashicorp vault
|
||||
type KMSConfig struct {
|
||||
AutoEncryption bool `json:"-"`
|
||||
Vault VaultConfig `json:"vault"`
|
||||
}
|
||||
@@ -32,7 +32,7 @@
|
||||
// Input: ClientKey, bucket, object, metadata, object_data
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || bucket || object)
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
@@ -43,7 +43,7 @@
|
||||
// Input: ClientKey, bucket, object, metadata, enc_object_data
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || bucket || object)
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
@@ -64,7 +64,7 @@
|
||||
// Input: MasterKey, bucket, object, metadata, object_data
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || bucket || object)
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
@@ -75,7 +75,7 @@
|
||||
// Input: MasterKey, bucket, object, metadata, enc_object_data
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || bucket || object)
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
@@ -92,7 +92,7 @@
|
||||
// - Key, EncKey := Generate(KeyID)
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || bucket || object)
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
@@ -108,7 +108,7 @@
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - Key := Unseal(KeyID, EncKey)
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || bucket || object)
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
|
||||
@@ -16,8 +16,56 @@ package crypto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Error is the generic type for any error happening during decrypting
|
||||
// an object. It indicates that the object itself or its metadata was
|
||||
// modified accidentally or maliciously.
|
||||
type Error struct{ msg string }
|
||||
|
||||
func (e Error) Error() string { return e.msg }
|
||||
|
||||
var (
|
||||
// ErrInvalidEncryptionMethod indicates that the specified SSE encryption method
|
||||
// is not supported.
|
||||
ErrInvalidEncryptionMethod = errors.New("The encryption method is not supported")
|
||||
|
||||
// ErrInvalidCustomerAlgorithm indicates that the specified SSE-C algorithm
|
||||
// is not supported.
|
||||
ErrInvalidCustomerAlgorithm = errors.New("The SSE-C algorithm is not supported")
|
||||
|
||||
// ErrMissingCustomerKey indicates that the HTTP headers contains no SSE-C client key.
|
||||
ErrMissingCustomerKey = errors.New("The SSE-C request is missing the customer key")
|
||||
|
||||
// ErrMissingCustomerKeyMD5 indicates that the HTTP headers contains no SSE-C client key
|
||||
// MD5 checksum.
|
||||
ErrMissingCustomerKeyMD5 = errors.New("The SSE-C request is missing the customer key MD5")
|
||||
|
||||
// ErrInvalidCustomerKey indicates that the SSE-C client key is not valid - e.g. not a
|
||||
// base64-encoded string or not 256 bits long.
|
||||
ErrInvalidCustomerKey = errors.New("The SSE-C client key is invalid")
|
||||
|
||||
// ErrSecretKeyMismatch indicates that the provided secret key (SSE-C client key / SSE-S3 KMS key)
|
||||
// does not match the secret key used during encrypting the object.
|
||||
ErrSecretKeyMismatch = errors.New("The secret key does not match the secret key used during upload")
|
||||
|
||||
// ErrCustomerKeyMD5Mismatch indicates that the SSE-C key MD5 does not match the
|
||||
// computed MD5 sum. This means that the client provided either the wrong key for
|
||||
// a certain MD5 checksum or the wrong MD5 for a certain key.
|
||||
ErrCustomerKeyMD5Mismatch = errors.New("The provided SSE-C key MD5 does not match the computed MD5 of the SSE-C key")
|
||||
// ErrIncompatibleEncryptionMethod indicates that both SSE-C headers and SSE-S3 headers were specified, and are incompatible
|
||||
// The client needs to remove the SSE-S3 header or the SSE-C headers
|
||||
ErrIncompatibleEncryptionMethod = errors.New("Server side encryption specified with both SSE-C and SSE-S3 headers")
|
||||
)
|
||||
|
||||
var (
|
||||
errMissingInternalIV = Error{"The object metadata is missing the internal encryption IV"}
|
||||
errMissingInternalSealAlgorithm = Error{"The object metadata is missing the internal seal algorithm"}
|
||||
|
||||
errInvalidInternalIV = Error{"The internal encryption IV is malformed"}
|
||||
errInvalidInternalSealAlgorithm = Error{"The internal seal algorithm is invalid and not supported"}
|
||||
)
|
||||
|
||||
var (
|
||||
// errOutOfEntropy indicates that the a source of randomness (PRNG) wasn't able
|
||||
// to produce enough random data. This is fatal error and should cause a panic.
|
||||
errOutOfEntropy = errors.New("Unable to read enough randomness from the system")
|
||||
)
|
||||
|
||||
@@ -15,16 +15,72 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SSEHeader is the general AWS SSE HTTP header key.
|
||||
const SSEHeader = "X-Amz-Server-Side-Encryption"
|
||||
|
||||
// SSEAlgorithmAES256 is the only supported value for the SSE-S3 or SSE-C algorithm header.
|
||||
// For SSE-S3 see: https://docs.aws.amazon.com/AmazonS3/latest/dev/SSEUsingRESTAPI.html
|
||||
// For SSE-C see: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
|
||||
const SSEAlgorithmAES256 = "AES256"
|
||||
const (
|
||||
// SSEKmsID is the HTTP header key referencing the SSE-KMS
|
||||
// key ID.
|
||||
SSEKmsID = SSEHeader + "-Aws-Kms-Key-Id"
|
||||
|
||||
// SSEKmsContext is the HTTP header key referencing the
|
||||
// SSE-KMS encryption context.
|
||||
SSEKmsContext = SSEHeader + "-Context"
|
||||
)
|
||||
|
||||
const (
|
||||
// SSECAlgorithm is the HTTP header key referencing
|
||||
// the SSE-C algorithm.
|
||||
SSECAlgorithm = SSEHeader + "-Customer-Algorithm"
|
||||
|
||||
// SSECKey is the HTTP header key referencing the
|
||||
// SSE-C client-provided key..
|
||||
SSECKey = SSEHeader + "-Customer-Key"
|
||||
|
||||
// SSECKeyMD5 is the HTTP header key referencing
|
||||
// the MD5 sum of the client-provided key.
|
||||
SSECKeyMD5 = SSEHeader + "-Customer-Key-Md5"
|
||||
)
|
||||
|
||||
const (
|
||||
// SSECopyAlgorithm is the HTTP header key referencing
|
||||
// the SSE-C algorithm for SSE-C copy requests.
|
||||
SSECopyAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
|
||||
|
||||
// SSECopyKey is the HTTP header key referencing the SSE-C
|
||||
// client-provided key for SSE-C copy requests.
|
||||
SSECopyKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
|
||||
|
||||
// SSECopyKeyMD5 is the HTTP header key referencing the
|
||||
// MD5 sum of the client key for SSE-C copy requests.
|
||||
SSECopyKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
|
||||
)
|
||||
|
||||
const (
|
||||
// SSEAlgorithmAES256 is the only supported value for the SSE-S3 or SSE-C algorithm header.
|
||||
// For SSE-S3 see: https://docs.aws.amazon.com/AmazonS3/latest/dev/SSEUsingRESTAPI.html
|
||||
// For SSE-C see: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
|
||||
SSEAlgorithmAES256 = "AES256"
|
||||
|
||||
// SSEAlgorithmKMS is the value of 'X-Amz-Server-Side-Encryption' for SSE-KMS.
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
|
||||
SSEAlgorithmKMS = "aws:kms"
|
||||
)
|
||||
|
||||
// RemoveSensitiveHeaders removes confidential encryption
|
||||
// information - e.g. the SSE-C key - from the HTTP headers.
|
||||
// It has the same semantics as RemoveSensitiveEntires.
|
||||
func RemoveSensitiveHeaders(h http.Header) {
|
||||
h.Del(SSECKey)
|
||||
h.Del(SSECopyKey)
|
||||
}
|
||||
|
||||
// S3 represents AWS SSE-S3. It provides functionality to handle
|
||||
// SSE-S3 requests.
|
||||
@@ -36,14 +92,129 @@ type s3 struct{}
|
||||
// the S3 client requests SSE-S3.
|
||||
func (s3) IsRequested(h http.Header) bool {
|
||||
_, ok := h[SSEHeader]
|
||||
return ok
|
||||
return ok && strings.ToLower(h.Get(SSEHeader)) != SSEAlgorithmKMS // Return only true if the SSE header is specified and does not contain the SSE-KMS value
|
||||
}
|
||||
|
||||
// Parse parses the SSE-S3 related HTTP headers and checks
|
||||
// ParseHTTP parses the SSE-S3 related HTTP headers and checks
|
||||
// whether they contain valid values.
|
||||
func (s3) Parse(h http.Header) (err error) {
|
||||
func (s3) ParseHTTP(h http.Header) (err error) {
|
||||
if h.Get(SSEHeader) != SSEAlgorithmAES256 {
|
||||
err = ErrInvalidEncryptionMethod
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// S3KMS represents AWS SSE-KMS. It provides functionality to
|
||||
// handle SSE-KMS requests.
|
||||
var S3KMS = s3KMS{}
|
||||
|
||||
type s3KMS struct{}
|
||||
|
||||
// IsRequested returns true if the HTTP headers indicates that
|
||||
// the S3 client requests SSE-KMS.
|
||||
func (s3KMS) IsRequested(h http.Header) bool {
|
||||
if _, ok := h[SSEKmsID]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSEKmsContext]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSEHeader]; ok {
|
||||
return strings.ToUpper(h.Get(SSEHeader)) != SSEAlgorithmAES256 // Return only true if the SSE header is specified and does not contain the SSE-S3 value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// SSEC represents AWS SSE-C. It provides functionality to handle
|
||||
// SSE-C requests.
|
||||
SSEC = ssec{}
|
||||
|
||||
// SSECopy represents AWS SSE-C for copy requests. It provides
|
||||
// functionality to handle SSE-C copy requests.
|
||||
SSECopy = ssecCopy{}
|
||||
)
|
||||
|
||||
type ssec struct{}
|
||||
type ssecCopy struct{}
|
||||
|
||||
// IsRequested returns true if the HTTP headers contains
|
||||
// at least one SSE-C header. SSE-C copy headers are ignored.
|
||||
func (ssec) IsRequested(h http.Header) bool {
|
||||
if _, ok := h[SSECAlgorithm]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSECKey]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSECKeyMD5]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsRequested returns true if the HTTP headers contains
|
||||
// at least one SSE-C copy header. Regular SSE-C headers
|
||||
// are ignored.
|
||||
func (ssecCopy) IsRequested(h http.Header) bool {
|
||||
if _, ok := h[SSECopyAlgorithm]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSECopyKey]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[SSECopyKeyMD5]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParseHTTP parses the SSE-C headers and returns the SSE-C client key
|
||||
// on success. SSE-C copy headers are ignored.
|
||||
func (ssec) ParseHTTP(h http.Header) (key [32]byte, err error) {
|
||||
if h.Get(SSECAlgorithm) != SSEAlgorithmAES256 {
|
||||
return key, ErrInvalidCustomerAlgorithm
|
||||
}
|
||||
if h.Get(SSECKey) == "" {
|
||||
return key, ErrMissingCustomerKey
|
||||
}
|
||||
if h.Get(SSECKeyMD5) == "" {
|
||||
return key, ErrMissingCustomerKeyMD5
|
||||
}
|
||||
|
||||
clientKey, err := base64.StdEncoding.DecodeString(h.Get(SSECKey))
|
||||
if err != nil || len(clientKey) != 32 { // The client key must be 256 bits long
|
||||
return key, ErrInvalidCustomerKey
|
||||
}
|
||||
keyMD5, err := base64.StdEncoding.DecodeString(h.Get(SSECKeyMD5))
|
||||
if md5Sum := md5.Sum(clientKey); err != nil || !bytes.Equal(md5Sum[:], keyMD5) {
|
||||
return key, ErrCustomerKeyMD5Mismatch
|
||||
}
|
||||
copy(key[:], clientKey)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// ParseHTTP parses the SSE-C copy headers and returns the SSE-C client key
|
||||
// on success. Regular SSE-C headers are ignored.
|
||||
func (ssecCopy) ParseHTTP(h http.Header) (key [32]byte, err error) {
|
||||
if h.Get(SSECopyAlgorithm) != SSEAlgorithmAES256 {
|
||||
return key, ErrInvalidCustomerAlgorithm
|
||||
}
|
||||
if h.Get(SSECopyKey) == "" {
|
||||
return key, ErrMissingCustomerKey
|
||||
}
|
||||
if h.Get(SSECopyKeyMD5) == "" {
|
||||
return key, ErrMissingCustomerKeyMD5
|
||||
}
|
||||
|
||||
clientKey, err := base64.StdEncoding.DecodeString(h.Get(SSECopyKey))
|
||||
if err != nil || len(clientKey) != 32 { // The client key must be 256 bits long
|
||||
return key, ErrInvalidCustomerKey
|
||||
}
|
||||
keyMD5, err := base64.StdEncoding.DecodeString(h.Get(SSECopyKeyMD5))
|
||||
if md5Sum := md5.Sum(clientKey); err != nil || !bytes.Equal(md5Sum[:], keyMD5) {
|
||||
return key, ErrCustomerKeyMD5Mismatch
|
||||
}
|
||||
copy(key[:], clientKey)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
@@ -16,28 +16,64 @@ package crypto
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var isRequestedTests = []struct {
|
||||
var kmsIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"aws:kms"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"0839-9047947-844842874-481"}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Context": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{""},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
|
||||
"X-Amz-Server-Side-Encryption-Context": []string{""},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 4
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 5
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: false}, // 6
|
||||
}
|
||||
|
||||
func TestKMSIsRequested(t *testing.T) {
|
||||
for i, test := range kmsIsRequestedTests {
|
||||
if got := S3KMS.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3IsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{SSEAlgorithmKMS}}, Expected: false}, // 4
|
||||
}
|
||||
|
||||
func TestS3IsRequested(t *testing.T) {
|
||||
for i, test := range isRequestedTests {
|
||||
for i, test := range s3IsRequestedTests {
|
||||
if got := S3.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parseTests = []struct {
|
||||
var s3ParseTests = []struct {
|
||||
Header http.Header
|
||||
ExpectedErr error
|
||||
}{
|
||||
@@ -48,9 +84,353 @@ var parseTests = []struct {
|
||||
}
|
||||
|
||||
func TestS3Parse(t *testing.T) {
|
||||
for i, test := range parseTests {
|
||||
if err := S3.Parse(test.Header); err != test.ExpectedErr {
|
||||
for i, test := range s3ParseTests {
|
||||
if err := S3.ParseHTTP(test.Header); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: Wanted '%v' but got '%v'", i, test.ExpectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ssecIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{""},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{""},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{""},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 4
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 5
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Expected: false,
|
||||
}, // 6
|
||||
}
|
||||
|
||||
func TestSSECIsRequested(t *testing.T) {
|
||||
for i, test := range ssecIsRequestedTests {
|
||||
if got := SSEC.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ssecCopyIsRequestedTests = []struct {
|
||||
Header http.Header
|
||||
Expected bool
|
||||
}{
|
||||
{Header: http.Header{}, Expected: false}, // 0
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
|
||||
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{""},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{""},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{""},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 4
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Expected: true,
|
||||
}, // 5
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Expected: false,
|
||||
}, // 6
|
||||
}
|
||||
|
||||
func TestSSECopyIsRequested(t *testing.T) {
|
||||
for i, test := range ssecCopyIsRequestedTests {
|
||||
if got := SSECopy.IsRequested(test.Header); got != test.Expected {
|
||||
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ssecParseTests = []struct {
|
||||
Header http.Header
|
||||
ExpectedErr error
|
||||
}{
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: nil, // 0
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES-256"}, // invalid algorithm
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrInvalidCustomerAlgorithm, // 1
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{""}, // no client key
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKey, // 2
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRr.ZXltdXN0cHJvdmlkZWQ="}, // invalid key
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrInvalidCustomerKey, // 3
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{""}, // no key MD5
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5, // 4
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"DzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, // wrong client key
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 5
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{".7PpPLAK26ONlVUGOWlusfg=="}, // wrong key MD5
|
||||
},
|
||||
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 6
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECParse(t *testing.T) {
|
||||
var zeroKey [32]byte
|
||||
for i, test := range ssecParseTests {
|
||||
key, err := SSEC.ParseHTTP(test.Header)
|
||||
if err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: want error '%v' but got '%v'", i, test.ExpectedErr, err)
|
||||
}
|
||||
|
||||
if err != nil && key != zeroKey {
|
||||
t.Errorf("Test %d: parsing failed and client key is not zero key", i)
|
||||
}
|
||||
if err == nil && key == zeroKey {
|
||||
t.Errorf("Test %d: parsed client key is zero key", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ssecCopyParseTests = []struct {
|
||||
Header http.Header
|
||||
ExpectedErr error
|
||||
}{
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: nil, // 0
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES-256"}, // invalid algorithm
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrInvalidCustomerAlgorithm, // 1
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{""}, // no client key
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKey, // 2
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRr.ZXltdXN0cHJvdmlkZWQ="}, // invalid key
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrInvalidCustomerKey, // 3
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{""}, // no key MD5
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5, // 4
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"DzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, // wrong client key
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 5
|
||||
},
|
||||
{
|
||||
Header: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{".7PpPLAK26ONlVUGOWlusfg=="}, // wrong key MD5
|
||||
},
|
||||
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 6
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECopyParse(t *testing.T) {
|
||||
var zeroKey [32]byte
|
||||
for i, test := range ssecCopyParseTests {
|
||||
key, err := SSECopy.ParseHTTP(test.Header)
|
||||
if err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: want error '%v' but got '%v'", i, test.ExpectedErr, err)
|
||||
}
|
||||
|
||||
if err != nil && key != zeroKey {
|
||||
t.Errorf("Test %d: parsing failed and client key is not zero key", i)
|
||||
}
|
||||
if err == nil && key == zeroKey {
|
||||
t.Errorf("Test %d: parsed client key is zero key", i)
|
||||
}
|
||||
if _, ok := test.Header[SSECKey]; ok {
|
||||
t.Errorf("Test %d: client key is not removed from HTTP headers after parsing", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var removeSensitiveHeadersTests = []struct {
|
||||
Header, ExpectedHeader http.Header
|
||||
}{
|
||||
{
|
||||
Header: http.Header{
|
||||
SSECKey: []string{""},
|
||||
SSECopyKey: []string{""},
|
||||
},
|
||||
ExpectedHeader: http.Header{},
|
||||
},
|
||||
{ // Standard SSE-C request headers
|
||||
Header: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
},
|
||||
{ // Standard SSE-C + SSE-C-copy request headers
|
||||
Header: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
},
|
||||
{ // Standard SSE-C + metadata request headers
|
||||
Header: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
ExpectedHeader: http.Header{
|
||||
SSECAlgorithm: []string{SSEAlgorithmAES256},
|
||||
SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
"X-Amz-Meta-Test-1": []string{"Test-1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveSensitiveHeaders(t *testing.T) {
|
||||
isEqual := func(x, y http.Header) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
for k, v := range x {
|
||||
u, ok := y[k]
|
||||
if !ok || len(v) != len(u) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(v)
|
||||
sort.Strings(u)
|
||||
for j := range v {
|
||||
if v[j] != u[j] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
areKeysEqual := func(h http.Header, metadata map[string]string) bool {
|
||||
if len(h) != len(metadata) {
|
||||
return false
|
||||
}
|
||||
for k := range h {
|
||||
if _, ok := metadata[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for i, test := range removeSensitiveHeadersTests {
|
||||
metadata := make(map[string]string, len(test.Header))
|
||||
for k := range test.Header {
|
||||
metadata[k] = "" // set metadata key - we don't care about the value
|
||||
}
|
||||
|
||||
RemoveSensitiveHeaders(test.Header)
|
||||
if !isEqual(test.ExpectedHeader, test.Header) {
|
||||
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
|
||||
}
|
||||
RemoveSensitiveEntries(metadata)
|
||||
if !areKeysEqual(test.ExpectedHeader, metadata) {
|
||||
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,8 +21,9 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
@@ -34,50 +35,98 @@ import (
|
||||
type ObjectKey [32]byte
|
||||
|
||||
// GenerateKey generates a unique ObjectKey from a 256 bit external key
|
||||
// and a source of randomness. If random is nil the default PRNG of system
|
||||
// (crypto/rand) is used.
|
||||
// and a source of randomness. If random is nil the default PRNG of the
|
||||
// system (crypto/rand) is used.
|
||||
func GenerateKey(extKey [32]byte, random io.Reader) (key ObjectKey) {
|
||||
if random == nil {
|
||||
random = rand.Reader
|
||||
}
|
||||
var nonce [32]byte
|
||||
if _, err := io.ReadFull(random, nonce[:]); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to read enough randomness from the system"))
|
||||
logger.CriticalIf(context.Background(), errOutOfEntropy)
|
||||
}
|
||||
sha := sha256.New()
|
||||
sha.Write(extKey[:])
|
||||
sha.Write(nonce[:])
|
||||
sha.Sum(key[:0])
|
||||
return
|
||||
return key
|
||||
}
|
||||
|
||||
// GenerateIV generates a new random 256 bit IV from the provided source
|
||||
// of randomness. If random is nil the default PRNG of the system
|
||||
// (crypto/rand) is used.
|
||||
func GenerateIV(random io.Reader) (iv [32]byte) {
|
||||
if random == nil {
|
||||
random = rand.Reader
|
||||
}
|
||||
if _, err := io.ReadFull(random, iv[:]); err != nil {
|
||||
logger.CriticalIf(context.Background(), errOutOfEntropy)
|
||||
}
|
||||
return iv
|
||||
}
|
||||
|
||||
// SealedKey represents a sealed object key. It can be stored
|
||||
// at an untrusted location.
|
||||
type SealedKey struct {
|
||||
Key [64]byte // The encrypted and authenticted object-key.
|
||||
IV [32]byte // The random IV used to encrypt the object-key.
|
||||
Algorithm string // The sealing algorithm used to encrypt the object key.
|
||||
}
|
||||
|
||||
// Seal encrypts the ObjectKey using the 256 bit external key and IV. The sealed
|
||||
// key is also cryptographically bound to the object's path (bucket/object).
|
||||
func (key ObjectKey) Seal(extKey, iv [32]byte, bucket, object string) []byte {
|
||||
var sealedKey bytes.Buffer
|
||||
// key is also cryptographically bound to the object's path (bucket/object) and the
|
||||
// domain (SSE-C or SSE-S3).
|
||||
func (key ObjectKey) Seal(extKey, iv [32]byte, domain, bucket, object string) SealedKey {
|
||||
var (
|
||||
sealingKey [32]byte
|
||||
encryptedKey bytes.Buffer
|
||||
)
|
||||
mac := hmac.New(sha256.New, extKey[:])
|
||||
mac.Write(iv[:])
|
||||
mac.Write([]byte(filepath.Join(bucket, object)))
|
||||
|
||||
if n, err := sio.Encrypt(&sealedKey, bytes.NewReader(key[:]), sio.Config{Key: mac.Sum(nil)}); n != 64 || err != nil {
|
||||
mac.Write([]byte(domain))
|
||||
mac.Write([]byte(SealAlgorithm))
|
||||
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
|
||||
mac.Sum(sealingKey[:0])
|
||||
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key"))
|
||||
}
|
||||
return sealedKey.Bytes()
|
||||
sealedKey := SealedKey{
|
||||
IV: iv,
|
||||
Algorithm: SealAlgorithm,
|
||||
}
|
||||
copy(sealedKey.Key[:], encryptedKey.Bytes())
|
||||
return sealedKey
|
||||
}
|
||||
|
||||
// Unseal decrypts a sealed key using the 256 bit external key and IV. Since the sealed key
|
||||
// is cryptographically bound to the object's path the same bucket/object as during sealing
|
||||
// Unseal decrypts a sealed key using the 256 bit external key. Since the sealed key
|
||||
// may be cryptographically bound to the object's path the same bucket/object as during sealing
|
||||
// must be provided. On success the ObjectKey contains the decrypted sealed key.
|
||||
func (key *ObjectKey) Unseal(sealedKey []byte, extKey, iv [32]byte, bucket, object string) error {
|
||||
var unsealedKey bytes.Buffer
|
||||
mac := hmac.New(sha256.New, extKey[:])
|
||||
mac.Write(iv[:])
|
||||
mac.Write([]byte(filepath.Join(bucket, object)))
|
||||
|
||||
if n, err := sio.Decrypt(&unsealedKey, bytes.NewReader(sealedKey), sio.Config{Key: mac.Sum(nil)}); n != 32 || err != nil {
|
||||
return err // TODO(aead): upgrade sio to use sio.Error
|
||||
func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucket, object string) error {
|
||||
var (
|
||||
unsealConfig sio.Config
|
||||
decryptedKey bytes.Buffer
|
||||
)
|
||||
switch sealedKey.Algorithm {
|
||||
default:
|
||||
return Error{fmt.Sprintf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm)}
|
||||
case SealAlgorithm:
|
||||
mac := hmac.New(sha256.New, extKey[:])
|
||||
mac.Write(sealedKey.IV[:])
|
||||
mac.Write([]byte(domain))
|
||||
mac.Write([]byte(SealAlgorithm))
|
||||
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil)}
|
||||
case InsecureSealAlgorithm:
|
||||
sha := sha256.New()
|
||||
sha.Write(extKey[:])
|
||||
sha.Write(sealedKey.IV[:])
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
|
||||
}
|
||||
copy(key[:], unsealedKey.Bytes())
|
||||
|
||||
if n, err := sio.Decrypt(&decryptedKey, bytes.NewReader(sealedKey.Key[:]), unsealConfig); n != 32 || err != nil {
|
||||
return ErrSecretKeyMismatch
|
||||
}
|
||||
copy(key[:], decryptedKey.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -89,5 +138,39 @@ func (key ObjectKey) DerivePartKey(id uint32) (partKey [32]byte) {
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write(bin[:])
|
||||
mac.Sum(partKey[:0])
|
||||
return
|
||||
return partKey
|
||||
}
|
||||
|
||||
// SealETag seals the etag using the object key.
|
||||
// It does not encrypt empty ETags because such ETags indicate
|
||||
// that the S3 client hasn't sent an ETag = MD5(object) and
|
||||
// the backend can pick an ETag value.
|
||||
func (key ObjectKey) SealETag(etag []byte) []byte {
|
||||
if len(etag) == 0 { // don't encrypt empty ETag - only if client sent ETag = MD5(object)
|
||||
return etag
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// UnsealETag unseals the etag using the provided object key.
|
||||
// It does not try to decrypt the ETag if len(etag) == 16
|
||||
// because such ETags indicate that the S3 client hasn't sent
|
||||
// an ETag = MD5(object) and the backend has picked an ETag value.
|
||||
func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
|
||||
if !IsETagSealed(etag) {
|
||||
return etag, nil
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
var shortRandom = func(limit int64) io.Reader { return io.LimitReader(rand.Reader, limit) }
|
||||
@@ -37,14 +39,18 @@ var generateKeyTests = []struct {
|
||||
Random io.Reader
|
||||
ShouldPass bool
|
||||
}{
|
||||
{ExtKey: [32]byte{}, Random: nil, ShouldPass: true}, // 0
|
||||
{ExtKey: [32]byte{}, Random: rand.Reader, ShouldPass: true}, // 1
|
||||
{ExtKey: [32]byte{}, Random: shortRandom(32), ShouldPass: true}, // 2
|
||||
// {ExtKey: [32]byte{}, Random: shortRandom(31), ShouldPass: false}, // 3 See: https://github.com/minio/minio/issues/6064
|
||||
{ExtKey: [32]byte{}, Random: nil, ShouldPass: true}, // 0
|
||||
{ExtKey: [32]byte{}, Random: rand.Reader, ShouldPass: true}, // 1
|
||||
{ExtKey: [32]byte{}, Random: shortRandom(32), ShouldPass: true}, // 2
|
||||
{ExtKey: [32]byte{}, Random: shortRandom(31), ShouldPass: false}, // 3
|
||||
}
|
||||
|
||||
func TestGenerateKey(t *testing.T) {
|
||||
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
|
||||
logger.Disable = true
|
||||
|
||||
for i, test := range generateKeyTests {
|
||||
i, test := i, test
|
||||
func() {
|
||||
defer recoverTest(i, test.ShouldPass, t)
|
||||
key := GenerateKey(test.ExtKey, test.Random)
|
||||
@@ -55,38 +61,64 @@ func TestGenerateKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var sealUnsealKeyTests = []struct {
|
||||
SealExtKey, SealIV [32]byte
|
||||
SealBucket, SealObject string
|
||||
var generateIVTests = []struct {
|
||||
Random io.Reader
|
||||
ShouldPass bool
|
||||
}{
|
||||
{Random: nil, ShouldPass: true}, // 0
|
||||
{Random: rand.Reader, ShouldPass: true}, // 1
|
||||
{Random: shortRandom(32), ShouldPass: true}, // 2
|
||||
{Random: shortRandom(31), ShouldPass: false}, // 3
|
||||
}
|
||||
|
||||
UnsealExtKey, UnsealIV [32]byte
|
||||
UnsealBucket, UnsealObject string
|
||||
func TestGenerateIV(t *testing.T) {
|
||||
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
|
||||
logger.Disable = true
|
||||
|
||||
for i, test := range generateIVTests {
|
||||
i, test := i, test
|
||||
func() {
|
||||
defer recoverTest(i, test.ShouldPass, t)
|
||||
iv := GenerateIV(test.Random)
|
||||
if iv == [32]byte{} {
|
||||
t.Errorf("Test %d: generated IV is zero IV", i) // check that we generate random and unique IV
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
var sealUnsealKeyTests = []struct {
|
||||
SealExtKey, SealIV [32]byte
|
||||
SealDomain, SealBucket, SealObject string
|
||||
|
||||
UnsealExtKey [32]byte
|
||||
UnsealDomain, UnsealBucket, UnsealObject string
|
||||
|
||||
ShouldPass bool
|
||||
}{
|
||||
{
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{}, UnsealBucket: "bucket", UnsealObject: "object",
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "object",
|
||||
ShouldPass: true,
|
||||
}, // 0
|
||||
{
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{1}, UnsealIV: [32]byte{0}, UnsealBucket: "bucket", UnsealObject: "object",
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{1}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "object", // different ext-key
|
||||
ShouldPass: false,
|
||||
}, // 1
|
||||
{
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{1}, UnsealBucket: "bucket", UnsealObject: "object",
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-S3", SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "object", // different domain
|
||||
ShouldPass: false,
|
||||
}, // 2
|
||||
{
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{}, UnsealBucket: "Bucket", UnsealObject: "object",
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "Bucket", UnsealObject: "object", // different bucket
|
||||
ShouldPass: false,
|
||||
}, // 3
|
||||
{
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealIV: [32]byte{}, UnsealBucket: "bucket", UnsealObject: "Object",
|
||||
SealExtKey: [32]byte{}, SealIV: [32]byte{}, SealDomain: "SSE-C", SealBucket: "bucket", SealObject: "object",
|
||||
UnsealExtKey: [32]byte{}, UnsealDomain: "SSE-C", UnsealBucket: "bucket", UnsealObject: "Object", // different object
|
||||
ShouldPass: false,
|
||||
}, // 4
|
||||
}
|
||||
@@ -94,13 +126,22 @@ var sealUnsealKeyTests = []struct {
|
||||
func TestSealUnsealKey(t *testing.T) {
|
||||
for i, test := range sealUnsealKeyTests {
|
||||
key := GenerateKey(test.SealExtKey, rand.Reader)
|
||||
sealedKey := key.Seal(test.SealExtKey, test.SealIV, test.SealBucket, test.SealObject)
|
||||
if err := key.Unseal(sealedKey, test.UnsealExtKey, test.UnsealIV, test.UnsealBucket, test.UnsealObject); err == nil && !test.ShouldPass {
|
||||
sealedKey := key.Seal(test.SealExtKey, test.SealIV, test.SealDomain, test.SealBucket, test.SealObject)
|
||||
if err := key.Unseal(test.UnsealExtKey, sealedKey, test.UnsealDomain, test.UnsealBucket, test.UnsealObject); err == nil && !test.ShouldPass {
|
||||
t.Errorf("Test %d should fail but passed successfully", i)
|
||||
} else if err != nil && test.ShouldPass {
|
||||
t.Errorf("Test %d should pass put failed: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test legacy InsecureSealAlgorithm
|
||||
var extKey, iv [32]byte
|
||||
key := GenerateKey(extKey, rand.Reader)
|
||||
sealedKey := key.Seal(extKey, iv, "SSE-S3", "bucket", "object")
|
||||
sealedKey.Algorithm = InsecureSealAlgorithm
|
||||
if err := key.Unseal(extKey, sealedKey, "SSE-S3", "bucket", "object"); err == nil {
|
||||
t.Errorf("'%s' test succeeded but it should fail because the legacy algorithm was used", sealedKey.Algorithm)
|
||||
}
|
||||
}
|
||||
|
||||
var derivePartKeyTest = []struct {
|
||||
@@ -125,3 +166,31 @@ func TestDerivePartKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sealUnsealETagTests = []string{
|
||||
"",
|
||||
"90682b8e8cc7609c",
|
||||
"90682b8e8cc7609c4671e1d64c73fc30",
|
||||
"90682b8e8cc7609c4671e1d64c73fc307fb3104f",
|
||||
}
|
||||
|
||||
func TestSealETag(t *testing.T) {
|
||||
var key ObjectKey
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
for i, etag := range sealUnsealETagTests {
|
||||
tag, err := hex.DecodeString(etag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decode etag: %s", i, err)
|
||||
}
|
||||
sealedETag := key.SealETag(tag)
|
||||
unsealedETag, err := key.UnsealETag(sealedETag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decrypt etag: %s", i, err)
|
||||
}
|
||||
if !bytes.Equal(unsealedETag, tag) {
|
||||
t.Errorf("Test %d: unsealed etag does not match: got %s - want %s", i, hex.EncodeToString(unsealedETag), etag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
138
cmd/crypto/kms.go
Normal file
138
cmd/crypto/kms.go
Normal file
@@ -0,0 +1,138 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
// Context is a list of key-value pairs cryptographically
|
||||
// associated with a certain object.
|
||||
type Context map[string]string
|
||||
|
||||
// WriteTo writes the context in a canonical from to w.
|
||||
// It returns the number of bytes and the first error
|
||||
// encounter during writing to w, if any.
|
||||
//
|
||||
// WriteTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
sortedKeys := make(sort.StringSlice, 0, len(c))
|
||||
for k := range c {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Sort(sortedKeys)
|
||||
|
||||
nn, err := io.WriteString(w, "{")
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
for i, k := range sortedKeys {
|
||||
s := fmt.Sprintf("\"%s\":\"%s\",", k, c[k])
|
||||
if i == len(sortedKeys)-1 {
|
||||
s = s[:len(s)-1] // remove last ','
|
||||
}
|
||||
|
||||
nn, err = io.WriteString(w, s)
|
||||
if err != nil {
|
||||
return n + int64(nn), err
|
||||
}
|
||||
n += int64(nn)
|
||||
}
|
||||
nn, err = io.WriteString(w, "}")
|
||||
return n + int64(nn), err
|
||||
}
|
||||
|
||||
// KMS represents an active and authenticted connection
|
||||
// to a Key-Management-Service. It supports generating
|
||||
// data key generation and unsealing of KMS-generated
|
||||
// data keys.
|
||||
type KMS interface {
|
||||
// GenerateKey generates a new random data key using
|
||||
// the master key referenced by the keyID. It returns
|
||||
// the plaintext key and the sealed plaintext key
|
||||
// on success.
|
||||
//
|
||||
// The context is cryptographically bound to the
|
||||
// generated key. The same context must be provided
|
||||
// again to unseal the generated key.
|
||||
GenerateKey(keyID string, context Context) (key [32]byte, sealedKey []byte, err error)
|
||||
|
||||
// UnsealKey unseals the sealedKey using the master key
|
||||
// referenced by the keyID. The provided context must
|
||||
// match the context used to generate the sealed key.
|
||||
UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error)
|
||||
}
|
||||
|
||||
type masterKeyKMS struct {
|
||||
masterKey [32]byte
|
||||
}
|
||||
|
||||
// NewKMS returns a basic KMS implementation from a single 256 bit master key.
|
||||
//
|
||||
// The KMS accepts any keyID but binds the keyID and context cryptographically
|
||||
// to the generated keys.
|
||||
func NewKMS(key [32]byte) KMS { return &masterKeyKMS{masterKey: key} }
|
||||
|
||||
func (kms *masterKeyKMS) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
if _, err = io.ReadFull(rand.Reader, key[:]); err != nil {
|
||||
logger.CriticalIf(context.Background(), errOutOfEntropy)
|
||||
}
|
||||
|
||||
var (
|
||||
buffer bytes.Buffer
|
||||
derivedKey = kms.deriveKey(keyID, ctx)
|
||||
)
|
||||
if n, err := sio.Encrypt(&buffer, bytes.NewReader(key[:]), sio.Config{Key: derivedKey[:]}); err != nil || n != 64 {
|
||||
logger.CriticalIf(context.Background(), errors.New("KMS: unable to encrypt data key"))
|
||||
}
|
||||
sealedKey = buffer.Bytes()
|
||||
return key, sealedKey, nil
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var (
|
||||
buffer bytes.Buffer
|
||||
derivedKey = kms.deriveKey(keyID, ctx)
|
||||
)
|
||||
if n, err := sio.Decrypt(&buffer, bytes.NewReader(sealedKey), sio.Config{Key: derivedKey[:]}); err != nil || n != 32 {
|
||||
return key, err // TODO(aead): upgrade sio to use sio.Error
|
||||
}
|
||||
copy(key[:], buffer.Bytes())
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
}
|
||||
mac := hmac.New(sha256.New, kms.masterKey[:])
|
||||
mac.Write([]byte(keyID))
|
||||
context.WriteTo(mac)
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
84
cmd/crypto/kms_test.go
Normal file
84
cmd/crypto/kms_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var masterKeyKMSTests = []struct {
|
||||
GenKeyID, UnsealKeyID string
|
||||
GenContext, UnsealContext Context
|
||||
|
||||
ShouldFail bool
|
||||
}{
|
||||
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{}, UnsealContext: nil, ShouldFail: false}, // 0
|
||||
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7f", GenContext: Context{}, UnsealContext: Context{}, ShouldFail: false}, // 1
|
||||
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7f", GenContext: Context{"bucket": "object"}, UnsealContext: Context{"bucket": "object"}, ShouldFail: false}, // 2
|
||||
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"bucket": path.Join("bucket", "object")}, UnsealContext: Context{"bucket": path.Join("bucket", "object")}, ShouldFail: false}, // 3
|
||||
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"a": "a", "0": "0", "b": "b"}, UnsealContext: Context{"b": "b", "a": "a", "0": "0"}, ShouldFail: false}, // 4
|
||||
|
||||
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7e", GenContext: Context{}, UnsealContext: Context{}, ShouldFail: true}, // 5
|
||||
{GenKeyID: "ac47be7f", UnsealKeyID: "ac47be7f", GenContext: Context{"bucket": "object"}, UnsealContext: Context{"Bucket": "object"}, ShouldFail: true}, // 6
|
||||
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"bucket": path.Join("bucket", "Object")}, UnsealContext: Context{"bucket": path.Join("bucket", "object")}, ShouldFail: true}, // 7
|
||||
{GenKeyID: "", UnsealKeyID: "", GenContext: Context{"a": "a", "0": "1", "b": "b"}, UnsealContext: Context{"b": "b", "a": "a", "0": "0"}, ShouldFail: true}, // 8
|
||||
}
|
||||
|
||||
func TestMasterKeyKMS(t *testing.T) {
|
||||
kms := NewKMS([32]byte{})
|
||||
for i, test := range masterKeyKMSTests {
|
||||
key, sealedKey, err := kms.GenerateKey(test.GenKeyID, test.GenContext)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: KMS failed to generate key: %v", i, err)
|
||||
}
|
||||
unsealedKey, err := kms.UnsealKey(test.UnsealKeyID, sealedKey, test.UnsealContext)
|
||||
if err != nil && !test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err)
|
||||
}
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS unsealed the generated successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) {
|
||||
t.Errorf("Test %d: The generated and unsealed key differ", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var contextWriteToTests = []struct {
|
||||
Context Context
|
||||
ExpectedJSON string
|
||||
}{
|
||||
{Context: Context{}, ExpectedJSON: "{}"}, // 0
|
||||
{Context: Context{"a": "b"}, ExpectedJSON: `{"a":"b"}`}, // 1
|
||||
{Context: Context{"a": "b", "c": "d"}, ExpectedJSON: `{"a":"b","c":"d"}`}, // 2
|
||||
{Context: Context{"c": "d", "a": "b"}, ExpectedJSON: `{"a":"b","c":"d"}`}, // 3
|
||||
{Context: Context{"0": "1", "-": "2", ".": "#"}, ExpectedJSON: `{"-":"2",".":"#","0":"1"}`}, // 4
|
||||
}
|
||||
|
||||
func TestContextWriteTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
var jsonContext strings.Builder
|
||||
if _, err := test.Context.WriteTo(&jsonContext); err != nil {
|
||||
t.Errorf("Test %d: Failed to encode context: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if s := jsonContext.String(); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
244
cmd/crypto/metadata.go
Normal file
244
cmd/crypto/metadata.go
Normal file
@@ -0,0 +1,244 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
// IsMultiPart returns true if the object metadata indicates
|
||||
// that it was uploaded using some form of server-side-encryption
|
||||
// and the S3 multipart API.
|
||||
func IsMultiPart(metadata map[string]string) bool {
|
||||
if _, ok := metadata[SSEMultipart]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveSensitiveEntries removes confidential encryption
|
||||
// information - e.g. the SSE-C key - from the metadata map.
|
||||
// It has the same semantics as RemoveSensitiveHeaders.
|
||||
func RemoveSensitiveEntries(metadata map[string]string) { // The functions is tested in TestRemoveSensitiveHeaders for compatibility reasons
|
||||
delete(metadata, SSECKey)
|
||||
delete(metadata, SSECopyKey)
|
||||
}
|
||||
|
||||
// RemoveSSEHeaders removes all crypto-specific SSE
|
||||
// header entries from the metadata map.
|
||||
func RemoveSSEHeaders(metadata map[string]string) {
|
||||
delete(metadata, SSEHeader)
|
||||
delete(metadata, SSECKeyMD5)
|
||||
delete(metadata, SSECAlgorithm)
|
||||
}
|
||||
|
||||
// RemoveInternalEntries removes all crypto-specific internal
|
||||
// metadata entries from the metadata map.
|
||||
func RemoveInternalEntries(metadata map[string]string) {
|
||||
delete(metadata, SSEMultipart)
|
||||
delete(metadata, SSEIV)
|
||||
delete(metadata, SSESealAlgorithm)
|
||||
delete(metadata, SSECSealedKey)
|
||||
delete(metadata, S3SealedKey)
|
||||
delete(metadata, S3KMSKeyID)
|
||||
delete(metadata, S3KMSSealedKey)
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that it was uploaded using some form of server-side-encryption.
|
||||
//
|
||||
// IsEncrypted only checks whether the metadata contains at least
|
||||
// one entry indicating SSE-C or SSE-S3.
|
||||
func IsEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[SSEIV]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[SSESealAlgorithm]; ok {
|
||||
return true
|
||||
}
|
||||
if IsMultiPart(metadata) {
|
||||
return true
|
||||
}
|
||||
if S3.IsEncrypted(metadata) {
|
||||
return true
|
||||
}
|
||||
if SSEC.IsEncrypted(metadata) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that the object was uploaded using SSE-S3.
|
||||
func (s3) IsEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[S3SealedKey]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[S3KMSKeyID]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := metadata[S3KMSSealedKey]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the object metadata indicates
|
||||
// that the object was uploaded using SSE-C.
|
||||
func (ssec) IsEncrypted(metadata map[string]string) bool {
|
||||
if _, ok := metadata[SSECSealedKey]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateMultipartMetadata adds the multipart flag entry to metadata
|
||||
// and returns modifed metadata. It allocates a new metadata map if
|
||||
// metadata is nil.
|
||||
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
}
|
||||
metadata[SSEMultipart] = ""
|
||||
return metadata
|
||||
}
|
||||
|
||||
// CreateMetadata encodes the keyID, the sealed kms data key and the sealed key
|
||||
// into the metadata and returns the modified metadata. It allocates a new
|
||||
// metadata map if metadata is nil.
|
||||
func (s3) CreateMetadata(metadata map[string]string, keyID string, kmsKey []byte, sealedKey SealedKey) map[string]string {
|
||||
if sealedKey.Algorithm != SealAlgorithm {
|
||||
logger.CriticalIf(context.Background(), fmt.Errorf("The seal algorithm '%s' is invalid for SSE-S3", sealedKey.Algorithm))
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
}
|
||||
metadata[S3KMSKeyID] = keyID
|
||||
metadata[SSESealAlgorithm] = sealedKey.Algorithm
|
||||
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
|
||||
metadata[S3SealedKey] = base64.StdEncoding.EncodeToString(sealedKey.Key[:])
|
||||
metadata[S3KMSSealedKey] = base64.StdEncoding.EncodeToString(kmsKey)
|
||||
return metadata
|
||||
}
|
||||
|
||||
// ParseMetadata extracts all SSE-S3 related values from the object metadata
|
||||
// and checks whether they are well-formed. It returns the KMS key-ID, the
|
||||
// sealed KMS key and the sealed object key on success.
|
||||
func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, err error) {
|
||||
// Extract all required values from object metadata
|
||||
b64IV, ok := metadata[SSEIV]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, errMissingInternalIV
|
||||
}
|
||||
algorithm, ok := metadata[SSESealAlgorithm]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, errMissingInternalSealAlgorithm
|
||||
}
|
||||
b64SealedKey, ok := metadata[S3SealedKey]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal sealed key for SSE-S3"}
|
||||
}
|
||||
keyID, ok = metadata[S3KMSKeyID]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal KMS key-ID for SSE-S3"}
|
||||
}
|
||||
b64KMSSealedKey, ok := metadata[S3KMSSealedKey]
|
||||
if !ok {
|
||||
return keyID, kmsKey, sealedKey, Error{"The object metadata is missing the internal sealed KMS data key for SSE-S3"}
|
||||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
iv, err := base64.StdEncoding.DecodeString(b64IV)
|
||||
if err != nil || len(iv) != 32 {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalIV
|
||||
}
|
||||
if algorithm != SealAlgorithm {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalSealAlgorithm
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
return keyID, kmsKey, sealedKey, Error{"The internal sealed key for SSE-S3 is invalid"}
|
||||
}
|
||||
kmsKey, err = base64.StdEncoding.DecodeString(b64KMSSealedKey)
|
||||
if err != nil {
|
||||
return keyID, kmsKey, sealedKey, Error{"The internal sealed KMS data key for SSE-S3 is invalid"}
|
||||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
copy(sealedKey.IV[:], iv)
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
return keyID, kmsKey, sealedKey, nil
|
||||
}
|
||||
|
||||
// CreateMetadata encodes the sealed key into the metadata and returns the modified metadata.
|
||||
// It allocates a new metadata map if metadata is nil.
|
||||
func (ssec) CreateMetadata(metadata map[string]string, sealedKey SealedKey) map[string]string {
|
||||
if sealedKey.Algorithm != SealAlgorithm {
|
||||
logger.CriticalIf(context.Background(), fmt.Errorf("The seal algorithm '%s' is invalid for SSE-C", sealedKey.Algorithm))
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = map[string]string{}
|
||||
}
|
||||
metadata[SSESealAlgorithm] = SealAlgorithm
|
||||
metadata[SSEIV] = base64.StdEncoding.EncodeToString(sealedKey.IV[:])
|
||||
metadata[SSECSealedKey] = base64.StdEncoding.EncodeToString(sealedKey.Key[:])
|
||||
return metadata
|
||||
}
|
||||
|
||||
// ParseMetadata extracts all SSE-C related values from the object metadata
|
||||
// and checks whether they are well-formed. It returns the sealed object key
|
||||
// on success.
|
||||
func (ssec) ParseMetadata(metadata map[string]string) (sealedKey SealedKey, err error) {
|
||||
// Extract all required values from object metadata
|
||||
b64IV, ok := metadata[SSEIV]
|
||||
if !ok {
|
||||
return sealedKey, errMissingInternalIV
|
||||
}
|
||||
algorithm, ok := metadata[SSESealAlgorithm]
|
||||
if !ok {
|
||||
return sealedKey, errMissingInternalSealAlgorithm
|
||||
}
|
||||
b64SealedKey, ok := metadata[SSECSealedKey]
|
||||
if !ok {
|
||||
return sealedKey, Error{"The object metadata is missing the internal sealed key for SSE-C"}
|
||||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
iv, err := base64.StdEncoding.DecodeString(b64IV)
|
||||
if err != nil || len(iv) != 32 {
|
||||
return sealedKey, errInvalidInternalIV
|
||||
}
|
||||
if algorithm != SealAlgorithm && algorithm != InsecureSealAlgorithm {
|
||||
return sealedKey, errInvalidInternalSealAlgorithm
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
return sealedKey, Error{"The internal sealed key for SSE-C is invalid"}
|
||||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
copy(sealedKey.IV[:], iv)
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
return sealedKey, nil
|
||||
}
|
||||
|
||||
// IsETagSealed returns true if the etag seems to be encrypted.
|
||||
func IsETagSealed(etag []byte) bool { return len(etag) > 16 }
|
||||
439
cmd/crypto/metadata_test.go
Normal file
439
cmd/crypto/metadata_test.go
Normal file
@@ -0,0 +1,439 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
var isMultipartTests = []struct {
|
||||
Metadata map[string]string
|
||||
Multipart bool
|
||||
}{
|
||||
{Multipart: true, Metadata: map[string]string{SSEMultipart: ""}}, // 0
|
||||
{Multipart: true, Metadata: map[string]string{"X-Minio-Internal-Encrypted-Multipart": ""}}, // 1
|
||||
{Multipart: true, Metadata: map[string]string{SSEMultipart: "some-value"}}, // 2
|
||||
{Multipart: false, Metadata: map[string]string{"": ""}}, // 3
|
||||
{Multipart: false, Metadata: map[string]string{"X-Minio-Internal-EncryptedMultipart": ""}}, // 4
|
||||
}
|
||||
|
||||
func TestIsMultipart(t *testing.T) {
|
||||
for i, test := range isMultipartTests {
|
||||
if isMultipart := IsMultiPart(test.Metadata); isMultipart != test.Multipart {
|
||||
t.Errorf("Test %d: got '%v' - want '%v'", i, isMultipart, test.Multipart)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var isEncryptedTests = []struct {
|
||||
Metadata map[string]string
|
||||
Encrypted bool
|
||||
}{
|
||||
{Encrypted: true, Metadata: map[string]string{SSEMultipart: ""}}, // 0
|
||||
{Encrypted: true, Metadata: map[string]string{SSEIV: ""}}, // 1
|
||||
{Encrypted: true, Metadata: map[string]string{SSESealAlgorithm: ""}}, // 2
|
||||
{Encrypted: true, Metadata: map[string]string{SSECSealedKey: ""}}, // 3
|
||||
{Encrypted: true, Metadata: map[string]string{S3SealedKey: ""}}, // 4
|
||||
{Encrypted: true, Metadata: map[string]string{S3KMSKeyID: ""}}, // 5
|
||||
{Encrypted: true, Metadata: map[string]string{S3KMSSealedKey: ""}}, // 6
|
||||
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
|
||||
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
|
||||
}
|
||||
|
||||
func TestIsEncrypted(t *testing.T) {
|
||||
for i, test := range isEncryptedTests {
|
||||
if isEncrypted := IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
|
||||
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3IsEncryptedTests = []struct {
|
||||
Metadata map[string]string
|
||||
Encrypted bool
|
||||
}{
|
||||
{Encrypted: false, Metadata: map[string]string{SSEMultipart: ""}}, // 0
|
||||
{Encrypted: false, Metadata: map[string]string{SSEIV: ""}}, // 1
|
||||
{Encrypted: false, Metadata: map[string]string{SSESealAlgorithm: ""}}, // 2
|
||||
{Encrypted: false, Metadata: map[string]string{SSECSealedKey: ""}}, // 3
|
||||
{Encrypted: true, Metadata: map[string]string{S3SealedKey: ""}}, // 4
|
||||
{Encrypted: true, Metadata: map[string]string{S3KMSKeyID: ""}}, // 5
|
||||
{Encrypted: true, Metadata: map[string]string{S3KMSSealedKey: ""}}, // 6
|
||||
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
|
||||
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
|
||||
}
|
||||
|
||||
func TestS3IsEncrypted(t *testing.T) {
|
||||
for i, test := range s3IsEncryptedTests {
|
||||
if isEncrypted := S3.IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
|
||||
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ssecIsEncryptedTests = []struct {
|
||||
Metadata map[string]string
|
||||
Encrypted bool
|
||||
}{
|
||||
{Encrypted: false, Metadata: map[string]string{SSEMultipart: ""}}, // 0
|
||||
{Encrypted: false, Metadata: map[string]string{SSEIV: ""}}, // 1
|
||||
{Encrypted: false, Metadata: map[string]string{SSESealAlgorithm: ""}}, // 2
|
||||
{Encrypted: true, Metadata: map[string]string{SSECSealedKey: ""}}, // 3
|
||||
{Encrypted: false, Metadata: map[string]string{S3SealedKey: ""}}, // 4
|
||||
{Encrypted: false, Metadata: map[string]string{S3KMSKeyID: ""}}, // 5
|
||||
{Encrypted: false, Metadata: map[string]string{S3KMSSealedKey: ""}}, // 6
|
||||
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
|
||||
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
|
||||
}
|
||||
|
||||
func TestSSECIsEncrypted(t *testing.T) {
|
||||
for i, test := range ssecIsEncryptedTests {
|
||||
if isEncrypted := SSEC.IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
|
||||
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3ParseMetadataTests = []struct {
|
||||
Metadata map[string]string
|
||||
ExpectedErr error
|
||||
|
||||
DataKey []byte
|
||||
KeyID string
|
||||
SealedKey SealedKey
|
||||
}{
|
||||
{ExpectedErr: errMissingInternalIV, Metadata: map[string]string{}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{}}, // 0
|
||||
{
|
||||
ExpectedErr: errMissingInternalSealAlgorithm, Metadata: map[string]string{SSEIV: ""},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 1
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal sealed key for SSE-S3"},
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: ""}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 2
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal KMS key-ID for SSE-S3"},
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: ""}, DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 3
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal sealed KMS data key for SSE-S3"},
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: ""},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 4
|
||||
{
|
||||
ExpectedErr: errInvalidInternalIV,
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: "", S3KMSSealedKey: ""},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 5
|
||||
{
|
||||
ExpectedErr: errInvalidInternalSealAlgorithm,
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: "", S3SealedKey: "", S3KMSKeyID: "", S3KMSSealedKey: "",
|
||||
},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 6
|
||||
{
|
||||
ExpectedErr: Error{"The internal sealed key for SSE-S3 is invalid"},
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm, S3SealedKey: "",
|
||||
S3KMSKeyID: "", S3KMSSealedKey: "",
|
||||
},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{},
|
||||
}, // 7
|
||||
{
|
||||
ExpectedErr: Error{"The internal sealed KMS data key for SSE-S3 is invalid"},
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
|
||||
S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)), S3KMSKeyID: "key-1",
|
||||
S3KMSSealedKey: ".MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=", // invalid base64
|
||||
},
|
||||
DataKey: []byte{}, KeyID: "key-1", SealedKey: SealedKey{},
|
||||
}, // 8
|
||||
{
|
||||
ExpectedErr: nil,
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
|
||||
S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)), S3KMSKeyID: "", S3KMSSealedKey: "",
|
||||
},
|
||||
DataKey: []byte{}, KeyID: "", SealedKey: SealedKey{Algorithm: SealAlgorithm},
|
||||
}, // 9
|
||||
{
|
||||
ExpectedErr: nil,
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 31)...)), SSESealAlgorithm: SealAlgorithm,
|
||||
S3SealedKey: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 63)...)), S3KMSKeyID: "key-1",
|
||||
S3KMSSealedKey: base64.StdEncoding.EncodeToString(make([]byte, 48)),
|
||||
},
|
||||
DataKey: make([]byte, 48), KeyID: "key-1", SealedKey: SealedKey{Algorithm: SealAlgorithm, Key: [64]byte{1}, IV: [32]byte{1}},
|
||||
}, // 10
|
||||
}
|
||||
|
||||
func TestS3ParseMetadata(t *testing.T) {
|
||||
for i, test := range s3ParseMetadataTests {
|
||||
keyID, dataKey, sealedKey, err := S3.ParseMetadata(test.Metadata)
|
||||
if err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got error '%v' - want error '%v'", i, err, test.ExpectedErr)
|
||||
}
|
||||
if !bytes.Equal(dataKey, test.DataKey) {
|
||||
t.Errorf("Test %d: got data key '%v' - want data key '%v'", i, dataKey, test.DataKey)
|
||||
}
|
||||
if keyID != test.KeyID {
|
||||
t.Errorf("Test %d: got key-ID '%v' - want key-ID '%v'", i, keyID, test.KeyID)
|
||||
}
|
||||
if sealedKey.Algorithm != test.SealedKey.Algorithm {
|
||||
t.Errorf("Test %d: got sealed key algorithm '%v' - want sealed key algorithm '%v'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
|
||||
t.Errorf("Test %d: got sealed key '%v' - want sealed key '%v'", i, sealedKey.Key, test.SealedKey.Key)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
|
||||
t.Errorf("Test %d: got sealed key IV '%v' - want sealed key IV '%v'", i, sealedKey.IV, test.SealedKey.IV)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ssecParseMetadataTests = []struct {
|
||||
Metadata map[string]string
|
||||
ExpectedErr error
|
||||
|
||||
SealedKey SealedKey
|
||||
}{
|
||||
{ExpectedErr: errMissingInternalIV, Metadata: map[string]string{}, SealedKey: SealedKey{}}, // 0
|
||||
{ExpectedErr: errMissingInternalSealAlgorithm, Metadata: map[string]string{SSEIV: ""}, SealedKey: SealedKey{}}, // 1
|
||||
{
|
||||
ExpectedErr: Error{"The object metadata is missing the internal sealed key for SSE-C"},
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: ""}, SealedKey: SealedKey{},
|
||||
}, // 2
|
||||
{
|
||||
ExpectedErr: errInvalidInternalIV,
|
||||
Metadata: map[string]string{SSEIV: "", SSESealAlgorithm: "", SSECSealedKey: ""}, SealedKey: SealedKey{},
|
||||
}, // 3
|
||||
{
|
||||
ExpectedErr: errInvalidInternalSealAlgorithm,
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: "", SSECSealedKey: "",
|
||||
},
|
||||
SealedKey: SealedKey{},
|
||||
}, // 4
|
||||
{
|
||||
ExpectedErr: Error{"The internal sealed key for SSE-C is invalid"},
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm, SSECSealedKey: "",
|
||||
},
|
||||
SealedKey: SealedKey{},
|
||||
}, // 5
|
||||
{
|
||||
ExpectedErr: nil,
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(make([]byte, 32)), SSESealAlgorithm: SealAlgorithm,
|
||||
SSECSealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
},
|
||||
SealedKey: SealedKey{Algorithm: SealAlgorithm},
|
||||
}, // 6
|
||||
{
|
||||
ExpectedErr: nil,
|
||||
Metadata: map[string]string{
|
||||
SSEIV: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 31)...)), SSESealAlgorithm: InsecureSealAlgorithm,
|
||||
SSECSealedKey: base64.StdEncoding.EncodeToString(append([]byte{1}, make([]byte, 63)...)),
|
||||
},
|
||||
SealedKey: SealedKey{Algorithm: InsecureSealAlgorithm, Key: [64]byte{1}, IV: [32]byte{1}},
|
||||
}, // 7
|
||||
}
|
||||
|
||||
func TestCreateMultipartMetadata(t *testing.T) {
|
||||
metadata := CreateMultipartMetadata(nil)
|
||||
if v, ok := metadata[SSEMultipart]; !ok || v != "" {
|
||||
t.Errorf("Metadata is missing the correct value for '%s': got '%s' - want '%s'", SSEMultipart, v, "")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSECParseMetadata(t *testing.T) {
|
||||
for i, test := range ssecParseMetadataTests {
|
||||
sealedKey, err := SSEC.ParseMetadata(test.Metadata)
|
||||
if err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got error '%v' - want error '%v'", i, err, test.ExpectedErr)
|
||||
}
|
||||
if sealedKey.Algorithm != test.SealedKey.Algorithm {
|
||||
t.Errorf("Test %d: got sealed key algorithm '%v' - want sealed key algorithm '%v'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
|
||||
t.Errorf("Test %d: got sealed key '%v' - want sealed key '%v'", i, sealedKey.Key, test.SealedKey.Key)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
|
||||
t.Errorf("Test %d: got sealed key IV '%v' - want sealed key IV '%v'", i, sealedKey.IV, test.SealedKey.IV)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3CreateMetadataTests = []struct {
|
||||
KeyID string
|
||||
SealedDataKey []byte
|
||||
SealedKey SealedKey
|
||||
}{
|
||||
{KeyID: "", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "deadbeef", SealedDataKey: make([]byte, 32), SealedKey: SealedKey{IV: [32]byte{0xf7}, Key: [64]byte{0xea}, Algorithm: SealAlgorithm}},
|
||||
}
|
||||
|
||||
func TestS3CreateMetadata(t *testing.T) {
|
||||
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
|
||||
logger.Disable = true
|
||||
for i, test := range s3CreateMetadataTests {
|
||||
metadata := S3.CreateMetadata(nil, test.KeyID, test.SealedDataKey, test.SealedKey)
|
||||
keyID, kmsKey, sealedKey, err := S3.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to parse metadata: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if keyID != test.KeyID {
|
||||
t.Errorf("Test %d: Key-ID mismatch: got '%s' - want '%s'", i, keyID, test.KeyID)
|
||||
}
|
||||
if !bytes.Equal(kmsKey, test.SealedDataKey) {
|
||||
t.Errorf("Test %d: sealed KMS data mismatch: got '%v' - want '%v'", i, kmsKey, test.SealedDataKey)
|
||||
}
|
||||
if sealedKey.Algorithm != test.SealedKey.Algorithm {
|
||||
t.Errorf("Test %d: seal algorithm mismatch: got '%s' - want '%s'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
|
||||
t.Errorf("Test %d: IV mismatch: got '%v' - want '%v'", i, sealedKey.IV, test.SealedKey.IV)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
|
||||
t.Errorf("Test %d: sealed key mismatch: got '%v' - want '%v'", i, sealedKey.Key, test.SealedKey.Key)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err == nil || err != logger.ErrCritical {
|
||||
t.Errorf("Expected '%s' panic for invalid seal algorithm but got '%s'", logger.ErrCritical, err)
|
||||
}
|
||||
}()
|
||||
_ = S3.CreateMetadata(nil, "", []byte{}, SealedKey{Algorithm: InsecureSealAlgorithm})
|
||||
}
|
||||
|
||||
var ssecCreateMetadataTests = []struct {
|
||||
KeyID string
|
||||
SealedDataKey []byte
|
||||
SealedKey SealedKey
|
||||
}{
|
||||
{KeyID: "", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}},
|
||||
{KeyID: "deadbeef", SealedDataKey: make([]byte, 32), SealedKey: SealedKey{IV: [32]byte{0xf7}, Key: [64]byte{0xea}, Algorithm: SealAlgorithm}},
|
||||
}
|
||||
|
||||
func TestSSECCreateMetadata(t *testing.T) {
|
||||
defer func(disableLog bool) { logger.Disable = disableLog }(logger.Disable)
|
||||
logger.Disable = true
|
||||
for i, test := range ssecCreateMetadataTests {
|
||||
metadata := SSEC.CreateMetadata(nil, test.SealedKey)
|
||||
sealedKey, err := SSEC.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to parse metadata: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if sealedKey.Algorithm != test.SealedKey.Algorithm {
|
||||
t.Errorf("Test %d: seal algorithm mismatch: got '%s' - want '%s'", i, sealedKey.Algorithm, test.SealedKey.Algorithm)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.IV[:], test.SealedKey.IV[:]) {
|
||||
t.Errorf("Test %d: IV mismatch: got '%v' - want '%v'", i, sealedKey.IV, test.SealedKey.IV)
|
||||
}
|
||||
if !bytes.Equal(sealedKey.Key[:], test.SealedKey.Key[:]) {
|
||||
t.Errorf("Test %d: sealed key mismatch: got '%v' - want '%v'", i, sealedKey.Key, test.SealedKey.Key)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err == nil || err != logger.ErrCritical {
|
||||
t.Errorf("Expected '%s' panic for invalid seal algorithm but got '%s'", logger.ErrCritical, err)
|
||||
}
|
||||
}()
|
||||
_ = SSEC.CreateMetadata(nil, SealedKey{Algorithm: InsecureSealAlgorithm})
|
||||
}
|
||||
|
||||
var isETagSealedTests = []struct {
|
||||
ETag string
|
||||
IsSealed bool
|
||||
}{
|
||||
{ETag: "", IsSealed: false}, // 0
|
||||
{ETag: "90682b8e8cc7609c4671e1d64c73fc30", IsSealed: false}, // 1
|
||||
{ETag: "f201040c9dc593e39ea004dc1323699bcd", IsSealed: true}, // 2 not valid ciphertext but looks like sealed ETag
|
||||
{ETag: "20000f00fba2ee2ae4845f725964eeb9e092edfabc7ab9f9239e8344341f769a51ce99b4801b0699b92b16a72fa94972", IsSealed: true}, // 3
|
||||
}
|
||||
|
||||
func TestIsETagSealed(t *testing.T) {
|
||||
for i, test := range isETagSealedTests {
|
||||
etag, err := hex.DecodeString(test.ETag)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to decode etag: %s", i, err)
|
||||
}
|
||||
if sealed := IsETagSealed(etag); sealed != test.IsSealed {
|
||||
t.Errorf("Test %d: got %v - want %v", i, sealed, test.IsSealed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var removeInternalEntriesTests = []struct {
|
||||
Metadata, Expected map[string]string
|
||||
}{
|
||||
{ // 0
|
||||
Metadata: map[string]string{
|
||||
SSEMultipart: "",
|
||||
SSEIV: "",
|
||||
SSESealAlgorithm: "",
|
||||
SSECSealedKey: "",
|
||||
S3SealedKey: "",
|
||||
S3KMSKeyID: "",
|
||||
S3KMSSealedKey: "",
|
||||
},
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
{ // 1
|
||||
Metadata: map[string]string{
|
||||
SSEMultipart: "",
|
||||
SSEIV: "",
|
||||
"X-Amz-Meta-A": "X",
|
||||
"X-Minio-Internal-B": "Y",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"X-Amz-Meta-A": "X",
|
||||
"X-Minio-Internal-B": "Y",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRemoveInternalEntries(t *testing.T) {
|
||||
isEqual := func(x, y map[string]string) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
for k, v := range x {
|
||||
if u, ok := y[k]; !ok || v != u {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for i, test := range removeInternalEntriesTests {
|
||||
RemoveInternalEntries(test.Metadata)
|
||||
if !isEqual(test.Metadata, test.Expected) {
|
||||
t.Errorf("Test %d: got %v - want %v", i, test.Metadata, test.Expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
@@ -25,16 +27,103 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// SSEMultipart is the metadata key indicating that the object
|
||||
// was uploaded using the S3 multipart API and stored using
|
||||
// some from of server-side-encryption.
|
||||
SSEMultipart = "X-Minio-Internal-Encrypted-Multipart"
|
||||
|
||||
// SSEIV is the metadata key referencing the random initialization
|
||||
// vector (IV) used for SSE-S3 and SSE-C key derivation.
|
||||
SSEIV = "X-Minio-Internal-Server-Side-Encryption-Iv"
|
||||
|
||||
// SSESealAlgorithm is the metadata key referencing the algorithm
|
||||
// used by SSE-C and SSE-S3 to encrypt the object.
|
||||
SSESealAlgorithm = "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm"
|
||||
|
||||
// SSECSealedKey is the metadata key referencing the sealed object-key for SSE-C.
|
||||
SSECSealedKey = "X-Minio-Internal-Server-Side-Encryption-Sealed-Key"
|
||||
|
||||
// S3SealedKey is the metadata key referencing the sealed object-key for SSE-S3.
|
||||
S3SealedKey = "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key"
|
||||
|
||||
// S3KMSKeyID is the metadata key referencing the KMS key-id used to
|
||||
// generate/decrypt the S3-KMS-Sealed-Key. It is only used for SSE-S3 + KMS.
|
||||
S3KMSKeyID = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id"
|
||||
|
||||
// S3KMSSealedKey is the metadata key referencing the encrypted key generated
|
||||
// by KMS. It is only used for SSE-S3 + KMS.
|
||||
S3KMSSealedKey = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key"
|
||||
)
|
||||
|
||||
const (
|
||||
// SealAlgorithm is the encryption/sealing algorithm used to derive & seal
|
||||
// the key-encryption-key and to en/decrypt the object data.
|
||||
SealAlgorithm = "DAREv2-HMAC-SHA256"
|
||||
|
||||
// InsecureSealAlgorithm is the legacy encryption/sealing algorithm used
|
||||
// to derive & seal the key-encryption-key and to en/decrypt the object data.
|
||||
// This algorithm should not be used for new objects because its key derivation
|
||||
// is not optimal. See: https://github.com/minio/minio/pull/6121
|
||||
InsecureSealAlgorithm = "DARE-SHA256"
|
||||
)
|
||||
|
||||
// String returns the SSE domain as string. For SSE-S3 the
|
||||
// domain is "SSE-S3".
|
||||
func (s3) String() string { return "SSE-S3" }
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using KMS and returns the decrypted object
|
||||
// key.
|
||||
func (sse s3) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
keyID, kmsKey, sealedKey, err := sse.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = key.Unseal(unsealKey, sealedKey, sse.String(), bucket, object)
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the SSE domain as string. For SSE-C the
|
||||
// domain is "SSE-C".
|
||||
func (ssec) String() string { return "SSE-C" }
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using the SSE-C client key of the HTTP headers
|
||||
// and returns the decrypted object key.
|
||||
func (sse ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
clientKey, err := sse.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return unsealObjectKey(clientKey, metadata, bucket, object)
|
||||
}
|
||||
|
||||
// UnsealObjectKey extracts and decrypts the sealed object key
|
||||
// from the metadata using the SSE-Copy client key of the HTTP headers
|
||||
// and returns the decrypted object key.
|
||||
func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
clientKey, err := sse.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return unsealObjectKey(clientKey, metadata, bucket, object)
|
||||
}
|
||||
|
||||
// unsealObjectKey decrypts and returns the sealed object key
|
||||
// from the metadata using the SSE-C client key.
|
||||
func unsealObjectKey(clientKey [32]byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
|
||||
sealedKey, err := SSEC.ParseMetadata(metadata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = key.Unseal(clientKey, sealedKey, SSEC.String(), bucket, object)
|
||||
return
|
||||
}
|
||||
|
||||
// EncryptSinglePart encrypts an io.Reader which must be the
|
||||
// the body of a single-part PUT request.
|
||||
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
|
||||
@@ -45,6 +134,14 @@ func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
|
||||
return r
|
||||
}
|
||||
|
||||
// EncryptMultiPart encrypts an io.Reader which must be the body of
|
||||
// multi-part PUT request. It derives an unique encryption key from
|
||||
// the partID and the object key.
|
||||
func EncryptMultiPart(r io.Reader, partID int, key ObjectKey) io.Reader {
|
||||
partKey := key.DerivePartKey(uint32(partID))
|
||||
return EncryptSinglePart(r, ObjectKey(partKey))
|
||||
}
|
||||
|
||||
// DecryptSinglePart decrypts an io.Writer which must an object
|
||||
// uploaded with the single-part PUT API. The offset and length
|
||||
// specify the requested range.
|
||||
|
||||
226
cmd/crypto/sse_test.go
Normal file
226
cmd/crypto/sse_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestS3String(t *testing.T) {
|
||||
const Domain = "SSE-S3"
|
||||
if domain := S3.String(); domain != Domain {
|
||||
t.Errorf("S3's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSECString(t *testing.T) {
|
||||
const Domain = "SSE-C"
|
||||
if domain := SSEC.String(); domain != Domain {
|
||||
t.Errorf("SSEC's string method returns wrong domain: got '%s' - want '%s'", domain, Domain)
|
||||
}
|
||||
}
|
||||
|
||||
var ssecUnsealObjectKeyTests = []struct {
|
||||
Headers http.Header
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object2",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrSecretKeyMismatch,
|
||||
},
|
||||
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECUnsealObjectKey(t *testing.T) {
|
||||
for i, test := range ssecUnsealObjectKeyTests {
|
||||
if _, err := SSEC.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sseCopyUnsealObjectKeyTests = []struct {
|
||||
Headers http.Header
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid HTTP headers and valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid HTTP headers but invalid metadata entries for bucket/object2
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object2",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrSecretKeyMismatch,
|
||||
},
|
||||
{ // 2 - Valid HTTP headers but invalid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
{ // 3 - Invalid HTTP headers for valid metadata entries for bucket/object
|
||||
Headers: http.Header{
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
},
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "IAAfAMBdYor5tf/UlVaQvwYlw5yKbPBeQqfygqsfHqhu1wHD9KDAP4bw38AhL12prFTS23JbbR9Re5Qv26ZnlQ==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "coVfGS3I/CTrqexX5vUN+PQPoP9aUFiPYYrSzqTWfBA=",
|
||||
},
|
||||
ExpectedErr: ErrMissingCustomerKeyMD5,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSSECopyUnsealObjectKey(t *testing.T) {
|
||||
for i, test := range sseCopyUnsealObjectKeyTests {
|
||||
if _, err := SSECopy.UnsealObjectKey(test.Headers, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var s3UnsealObjectKeyTests = []struct {
|
||||
KMS KMS
|
||||
Bucket, Object string
|
||||
Metadata map[string]string
|
||||
|
||||
ExpectedErr error
|
||||
}{
|
||||
{ // 0 - Valid KMS key-ID and valid metadata entries for bucket/object
|
||||
KMS: NewKMS([32]byte{}),
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
|
||||
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256",
|
||||
},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{ // 1 - Valid KMS key-ID for invalid metadata entries for bucket/object
|
||||
KMS: NewKMS([32]byte{}),
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
Metadata: map[string]string{
|
||||
"X-Minio-Internal-Server-Side-Encryption-Iv": "hhVY0LKR1YtZbzAKxTWUfZt5enDfYX6Fxz1ma8Kiudc=",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfALhsOeD5AE3s5Zgq3DZ5VFGsOa3B0ksVC86veDcaj+fXv2U0VadhPaOKYr9Emd5ssOsO0uIhIIrKiOy9rA==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAMRS2iw45FsfiF3QXajSYVWj1lxMpQm6DxDGPtADCX6fJQQ4atHBtfpgqJFyeQmIHsm0FBI+UlHw1Lv4ug==",
|
||||
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "test-key-1",
|
||||
},
|
||||
ExpectedErr: errMissingInternalSealAlgorithm,
|
||||
},
|
||||
}
|
||||
|
||||
func TestS3UnsealObjectKey(t *testing.T) {
|
||||
for i, test := range s3UnsealObjectKeyTests {
|
||||
if _, err := S3.UnsealObjectKey(test.KMS, test.Metadata, test.Bucket, test.Object); err != test.ExpectedErr {
|
||||
t.Errorf("Test %d: got: %v - want: %v", i, err, test.ExpectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
249
cmd/crypto/vault.go
Normal file
249
cmd/crypto/vault.go
Normal file
@@ -0,0 +1,249 @@
|
||||
// Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
vault "github.com/hashicorp/vault/api"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
//ErrKMSAuthLogin is raised when there is a failure authenticating to KMS
|
||||
ErrKMSAuthLogin = errors.New("Vault service did not return auth info")
|
||||
)
|
||||
|
||||
// VaultKey represents vault encryption key-ring.
|
||||
type VaultKey struct {
|
||||
Name string `json:"name"` // The name of the encryption key-ring
|
||||
Version int `json:"version"` // The key version
|
||||
}
|
||||
|
||||
// VaultAuth represents vault authentication type.
|
||||
// Currently the only supported authentication type is AppRole.
|
||||
type VaultAuth struct {
|
||||
Type string `json:"type"` // The authentication type
|
||||
AppRole VaultAppRole `json:"approle"` // The AppRole authentication credentials
|
||||
}
|
||||
|
||||
// VaultAppRole represents vault AppRole authentication credentials
|
||||
type VaultAppRole struct {
|
||||
ID string `json:"id"` // The AppRole access ID
|
||||
Secret string `json:"secret"` // The AppRole secret
|
||||
}
|
||||
|
||||
// VaultConfig represents vault configuration.
|
||||
type VaultConfig struct {
|
||||
Endpoint string `json:"endpoint"` // The vault API endpoint as URL
|
||||
CAPath string `json:"-"` // The path to PEM-encoded certificate files used for mTLS. Currently not used in config file.
|
||||
Auth VaultAuth `json:"auth"` // The vault authentication configuration
|
||||
Key VaultKey `json:"key-id"` // The named key used for key-generation / decryption.
|
||||
Namespace string `json:"-"` // The vault namespace of enterprise vault instances
|
||||
}
|
||||
|
||||
// vaultService represents a connection to a vault KMS.
|
||||
type vaultService struct {
|
||||
config *VaultConfig
|
||||
client *vault.Client
|
||||
leaseDuration time.Duration
|
||||
tokenRenewer *vault.Renewer
|
||||
}
|
||||
|
||||
var _ KMS = (*vaultService)(nil) // compiler check that *vaultService implements KMS
|
||||
|
||||
// empty/default vault configuration used to check whether a particular is empty.
|
||||
var emptyVaultConfig = VaultConfig{}
|
||||
|
||||
// IsEmpty returns true if the vault config struct is an
|
||||
// empty configuration.
|
||||
func (v *VaultConfig) IsEmpty() bool { return *v == emptyVaultConfig }
|
||||
|
||||
// Verify returns a nil error if the vault configuration
|
||||
// is valid. A valid configuration is either empty or
|
||||
// contains valid non-default values.
|
||||
func (v *VaultConfig) Verify() (err error) {
|
||||
if v.IsEmpty() {
|
||||
return // an empty configuration is valid
|
||||
}
|
||||
switch {
|
||||
case v.Endpoint == "":
|
||||
err = errors.New("crypto: missing hashicorp vault endpoint")
|
||||
case strings.ToLower(v.Auth.Type) != "approle":
|
||||
err = fmt.Errorf("crypto: invalid hashicorp vault authentication type: %s is not supported", v.Auth.Type)
|
||||
case v.Auth.AppRole.ID == "":
|
||||
err = errors.New("crypto: missing hashicorp vault AppRole ID")
|
||||
case v.Auth.AppRole.Secret == "":
|
||||
err = errors.New("crypto: missing hashicorp vault AppSecret ID")
|
||||
case v.Key.Name == "":
|
||||
err = errors.New("crypto: missing hashicorp vault key name")
|
||||
case v.Key.Version < 0:
|
||||
err = errors.New("crypto: invalid hashicorp vault key version: The key version must not be negative")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewVault initializes Hashicorp Vault KMS by authenticating
|
||||
// to Vault with the credentials in config and gets a client
|
||||
// token for future api calls.
|
||||
func NewVault(config VaultConfig) (KMS, error) {
|
||||
if config.IsEmpty() {
|
||||
return nil, errors.New("crypto: the hashicorp vault configuration must not be empty")
|
||||
}
|
||||
if err := config.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vaultCfg := vault.Config{Address: config.Endpoint}
|
||||
if err := vaultCfg.ConfigureTLS(&vault.TLSConfig{CAPath: config.CAPath}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := vault.NewClient(&vaultCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Namespace != "" {
|
||||
client.SetNamespace(config.Namespace)
|
||||
}
|
||||
v := &vaultService{client: client, config: &config}
|
||||
|
||||
if err := v.authenticate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// reauthenticate() tries to login in 1 minute
|
||||
// intervals until successful.
|
||||
func (v *vaultService) reauthenticate() {
|
||||
retryDelay := 1 * time.Minute
|
||||
go func() {
|
||||
for {
|
||||
if err := v.authenticate(); err != nil {
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// renewer calls vault client's renewer that automatically
|
||||
// renews secret periodically
|
||||
func (v *vaultService) renewer(secret *vault.Secret) {
|
||||
renewer, err := v.client.NewRenewer(&vault.RenewerInput{
|
||||
Secret: secret,
|
||||
})
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "crypto: hashicorp vault token renewer could not be started")
|
||||
}
|
||||
v.tokenRenewer = renewer
|
||||
go renewer.Renew()
|
||||
defer renewer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-renewer.DoneCh():
|
||||
if err != nil {
|
||||
v.reauthenticate()
|
||||
renewer.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
// Renewal is now over
|
||||
case renewal := <-renewer.RenewCh():
|
||||
v.leaseDuration = time.Duration(renewal.Secret.Auth.LeaseDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// authenticate logs the app to vault, and starts the auto renewer
|
||||
// before secret expires
|
||||
func (v *vaultService) authenticate() (err error) {
|
||||
payload := map[string]interface{}{
|
||||
"role_id": v.config.Auth.AppRole.ID,
|
||||
"secret_id": v.config.Auth.AppRole.Secret,
|
||||
}
|
||||
var secret *vault.Secret
|
||||
secret, err = v.client.Logical().Write("auth/approle/login", payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if secret.Auth == nil {
|
||||
err = ErrKMSAuthLogin
|
||||
return
|
||||
}
|
||||
v.client.SetToken(secret.Auth.ClientToken)
|
||||
v.leaseDuration = time.Duration(secret.Auth.LeaseDuration)
|
||||
go v.renewer(secret)
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateKey returns a new plaintext key, generated by the KMS,
|
||||
// and a sealed version of this plaintext key encrypted using the
|
||||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
sealKey := s.Data["ciphertext"].(string)
|
||||
plainKey, err := base64.StdEncoding.DecodeString(s.Data["plaintext"].(string))
|
||||
if err != nil {
|
||||
return key, sealedKey, err
|
||||
}
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, []byte(sealKey), nil
|
||||
}
|
||||
|
||||
// UnsealKey returns the decrypted sealedKey as plaintext key.
|
||||
// Therefore it sends the sealedKey to the KMS which decrypts
|
||||
// it using the named key referenced by keyID and responses with
|
||||
// the plaintext key.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
base64Key := s.Data["plaintext"].(string)
|
||||
plainKey, err := base64.StdEncoding.DecodeString(base64Key)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
copy(key[:], []byte(plainKey))
|
||||
return key, nil
|
||||
}
|
||||
@@ -18,7 +18,11 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/ellipses"
|
||||
)
|
||||
|
||||
// CacheConfig represents cache config settings
|
||||
@@ -41,6 +45,15 @@ func (cfg *CacheConfig) UnmarshalJSON(data []byte) (err error) {
|
||||
if err = json.Unmarshal(data, _cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _cfg.Expiry < 0 {
|
||||
return errors.New("config expiry value should not be negative")
|
||||
}
|
||||
|
||||
if _cfg.MaxUse < 0 {
|
||||
return errors.New("config max use value should not be null or negative")
|
||||
}
|
||||
|
||||
if _, err = parseCacheDrives(_cfg.Drives); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -52,12 +65,42 @@ func (cfg *CacheConfig) UnmarshalJSON(data []byte) (err error) {
|
||||
|
||||
// Parses given cacheDrivesEnv and returns a list of cache drives.
|
||||
func parseCacheDrives(drives []string) ([]string, error) {
|
||||
if len(drives) == 0 {
|
||||
return drives, nil
|
||||
}
|
||||
var endpoints []string
|
||||
for _, d := range drives {
|
||||
if ellipses.HasEllipses(d) {
|
||||
s, err := parseCacheDrivePaths(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoints = append(endpoints, s...)
|
||||
} else {
|
||||
endpoints = append(endpoints, d)
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range endpoints {
|
||||
if !filepath.IsAbs(d) {
|
||||
return nil, uiErrInvalidCacheDrivesValue(nil).Msg("cache dir should be absolute path: %s", d)
|
||||
}
|
||||
}
|
||||
return drives, nil
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// Parses all arguments and returns a slice of drive paths following the ellipses pattern.
|
||||
func parseCacheDrivePaths(arg string) (ep []string, err error) {
|
||||
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
||||
if perr != nil {
|
||||
return []string{}, uiErrInvalidCacheDrivesValue(nil).Msg(perr.Error())
|
||||
}
|
||||
|
||||
for _, lbls := range patterns.Expand() {
|
||||
ep = append(ep, strings.Join(lbls, ""))
|
||||
}
|
||||
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
// Parses given cacheExcludesEnv and returns a list of cache exclude patterns.
|
||||
|
||||
@@ -41,12 +41,32 @@ func TestParseCacheDrives(t *testing.T) {
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"C:/home/drive1;C:/home/drive2;C:/home/drive3", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"C:/home/drive{1...3}", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"C:/home/drive{1..3}", []string{}, false})
|
||||
} else {
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"/home/drive1;/home/drive2;/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"/home/drive{1...3}", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"/home/drive{1..3}", []string{}, false})
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
drives, err := parseCacheDrives(strings.Split(testCase.driveStr, cacheEnvDelimiter))
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
@@ -92,7 +91,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
|
||||
appendFileMap: make(map[string]*fsAppendFile),
|
||||
}
|
||||
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
|
||||
|
||||
cacheFS := cacheFSObjects{
|
||||
FSObjects: fsObjects,
|
||||
@@ -159,7 +158,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-globalServiceDoneCh:
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
|
||||
@@ -258,7 +257,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) error {
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) error {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
@@ -275,7 +274,7 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
||||
return pErr
|
||||
}
|
||||
}
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, metadata)
|
||||
_, err := cfs.PutObject(ctx, bucket, object, data, metadata, opts)
|
||||
// if err is due to disk being offline , mark cache drive as offline
|
||||
if IsErr(err, baseErrs...) {
|
||||
cfs.setOnline(false)
|
||||
@@ -284,8 +283,8 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
||||
}
|
||||
|
||||
// Returns the handle for the cached object
|
||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
// Deletes the cached object
|
||||
@@ -295,13 +294,14 @@ func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (e
|
||||
|
||||
// convenience function to check if object is cached on this cacheFSObjects
|
||||
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
|
||||
_, err := cfs.GetObjectInfo(ctx, bucket, object)
|
||||
_, err := cfs.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||
// headers.
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||
data := r.Reader
|
||||
fs := cfs.FSObjects
|
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
@@ -354,7 +354,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < 0 {
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument)
|
||||
return ObjectInfo{}, errInvalidArgument
|
||||
}
|
||||
@@ -438,7 +438,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
// Implements S3 compatible initiate multipart API. Operation here is identical
|
||||
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
||||
// generated on the backend
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string) (string, error) {
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string, opts ObjectOptions) (string, error) {
|
||||
if cfs.diskUsageHigh() {
|
||||
select {
|
||||
case cfs.purgeChan <- struct{}{}:
|
||||
|
||||
@@ -31,18 +31,14 @@ import (
|
||||
|
||||
"github.com/djherbis/atime"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
// list of all errors that can be ignored in tree walk operation in disk cache
|
||||
var cacheTreeWalkIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound)
|
||||
|
||||
const (
|
||||
// disk cache needs to have cacheSizeMultiplier * object size space free for a cache entry to be created.
|
||||
cacheSizeMultiplier = 100
|
||||
// disk cache needs to have object size space free for a cache entry to be created.
|
||||
cacheTrashDir = "trash"
|
||||
cacheCleanupInterval = 10 // in minutes
|
||||
)
|
||||
@@ -61,18 +57,19 @@ type cacheObjects struct {
|
||||
// file path patterns to exclude from cache
|
||||
exclude []string
|
||||
// Object functions pointing to the corresponding functions of backend implementation.
|
||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
GetObjectInfoFn func(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
||||
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteBucketFn func(ctx context.Context, bucket string) error
|
||||
}
|
||||
|
||||
@@ -92,21 +89,27 @@ type CacheObjectLayer interface {
|
||||
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
DeleteBucket(ctx context.Context, bucket string) error
|
||||
// Object operations.
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(ctx context.Context, bucket, object string) error
|
||||
|
||||
// Multipart operations.
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
|
||||
// Storage operations.
|
||||
StorageInfo(ctx context.Context) CacheStorageInfo
|
||||
}
|
||||
|
||||
// IsCacheable returns if the object should be saved in the cache.
|
||||
func (o ObjectInfo) IsCacheable() bool {
|
||||
return !crypto.IsEncrypted(o.UserDefined)
|
||||
}
|
||||
|
||||
// backendDownError returns true if err is due to backend failure or faulty disk if in server mode
|
||||
func backendDownError(err error) bool {
|
||||
_, backendDown := err.(BackendDown)
|
||||
@@ -179,22 +182,99 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
|
||||
return metadata
|
||||
}
|
||||
|
||||
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
|
||||
objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts)
|
||||
if backendDownError(err) && cacheErr == nil {
|
||||
return cacheReader, nil
|
||||
} else if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
// Delete cached entry if backend object was deleted.
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !objInfo.IsCacheable() || filterFromCache(objInfo.UserDefined) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
if cacheErr == nil {
|
||||
if cacheReader.ObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
||||
// Object is not stale, so serve from cache
|
||||
return cacheReader, nil
|
||||
}
|
||||
cacheReader.Close()
|
||||
// Object is stale, so delete from cache
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// Since we got here, we are serving the request from backend,
|
||||
// and also adding the object to the cache.
|
||||
|
||||
if rs != nil {
|
||||
// We don't cache partial objects.
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
}
|
||||
|
||||
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts)
|
||||
if bkErr != nil {
|
||||
return nil, bkErr
|
||||
}
|
||||
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size)
|
||||
if herr != nil {
|
||||
bkReader.Close()
|
||||
return nil, herr
|
||||
}
|
||||
|
||||
go func() {
|
||||
opts := ObjectOptions{}
|
||||
putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(bkReader.ObjInfo), opts)
|
||||
// close the write end of the pipe, so the error gets
|
||||
// propagated to getObjReader
|
||||
pipeWriter.CloseWithError(putErr)
|
||||
}()
|
||||
|
||||
cleanupBackend := func() { bkReader.Close() }
|
||||
cleanupPipe := func() { pipeReader.Close() }
|
||||
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe)
|
||||
return gr, nil
|
||||
}
|
||||
|
||||
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
|
||||
// stores it in the cache for serving subsequent requests.
|
||||
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
GetObjectFn := c.GetObjectFn
|
||||
GetObjectInfoFn := c.GetObjectInfoFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// stat object on backend
|
||||
objInfo, err := GetObjectInfoFn(ctx, bucket, object)
|
||||
objInfo, err := GetObjectInfoFn(ctx, bucket, object, opts)
|
||||
backendDown := backendDownError(err)
|
||||
if err != nil && !backendDown {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
@@ -204,43 +284,43 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
return err
|
||||
}
|
||||
|
||||
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
if !backendDown && !objInfo.IsCacheable() {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
|
||||
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err == nil {
|
||||
if backendDown {
|
||||
// If the backend is down, serve the request from cache.
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
if cachedObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
}
|
||||
if startOffset != 0 || length != objInfo.Size {
|
||||
if startOffset != 0 || (length > 0 && length != objInfo.Size) {
|
||||
// We don't cache partial objects.
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
|
||||
// cache only objects < 1/100th of disk capacity
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
if !dcache.diskAvailable(objInfo.Size) {
|
||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "")
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
|
||||
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
|
||||
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
|
||||
}()
|
||||
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo))
|
||||
err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(objInfo), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -249,17 +329,17 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
}
|
||||
|
||||
// Returns ObjectInfo from cache if available.
|
||||
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string) (ObjectInfo, error) {
|
||||
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
getObjectInfoFn := c.GetObjectInfoFn
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return getObjectInfoFn(ctx, bucket, object)
|
||||
return getObjectInfoFn(ctx, bucket, object, opts)
|
||||
}
|
||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return getObjectInfoFn(ctx, bucket, object)
|
||||
return getObjectInfoFn(ctx, bucket, object, opts)
|
||||
}
|
||||
objInfo, err := getObjectInfoFn(ctx, bucket, object)
|
||||
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
// Delete the cached entry if backend object was deleted.
|
||||
@@ -270,14 +350,14 @@ func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string)
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// when backend is down, serve from cache.
|
||||
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object)
|
||||
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if cerr == nil {
|
||||
return cachedObjInfo, nil
|
||||
}
|
||||
return ObjectInfo{}, BackendDown{}
|
||||
}
|
||||
// when backend is up, do a sanity check on cached object
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
|
||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return objInfo, nil
|
||||
}
|
||||
@@ -291,40 +371,32 @@ func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string)
|
||||
// Returns function "listDir" of the type listDirFunc.
|
||||
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
|
||||
// disks - list of fsObjects
|
||||
func listDirCacheFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks []*cacheFSObjects) listDirFunc {
|
||||
listCacheDirs := func(bucket, prefixDir, prefixEntry string) (dirs []string, err error) {
|
||||
func listDirCacheFactory(isLeaf isLeafFunc, disks []*cacheFSObjects) listDirFunc {
|
||||
listCacheDirs := func(bucket, prefixDir, prefixEntry string) (dirs []string) {
|
||||
var entries []string
|
||||
for _, disk := range disks {
|
||||
// ignore disk-caches that might be missing/offline
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
fs := disk.FSObjects
|
||||
entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir))
|
||||
|
||||
// For any reason disk was deleted or goes offline, continue
|
||||
// and list from other disks if possible.
|
||||
fs := disk.FSObjects
|
||||
var err error
|
||||
entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir))
|
||||
if err != nil {
|
||||
if IsErrIgnored(err, treeWalkIgnoredErrs...) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter entries that have the prefix prefixEntry.
|
||||
entries = filterMatchingPrefix(entries, prefixEntry)
|
||||
dirs = append(dirs, entries...)
|
||||
}
|
||||
return dirs, nil
|
||||
return dirs
|
||||
}
|
||||
|
||||
// listDir - lists all the entries at a given prefix and given entry in the prefix.
|
||||
listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool, err error) {
|
||||
var cacheEntries []string
|
||||
cacheEntries, err = listCacheDirs(bucket, prefixDir, prefixEntry)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool) {
|
||||
cacheEntries := listCacheDirs(bucket, prefixDir, prefixEntry)
|
||||
for _, entry := range cacheEntries {
|
||||
// Find elements in entries which are not in mergedEntries
|
||||
idx := sort.SearchStrings(mergedEntries, entry)
|
||||
@@ -335,7 +407,7 @@ func listDirCacheFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks [
|
||||
mergedEntries = append(mergedEntries, entry)
|
||||
sort.Strings(mergedEntries)
|
||||
}
|
||||
return mergedEntries, false, nil
|
||||
return mergedEntries, false
|
||||
}
|
||||
return listDir
|
||||
}
|
||||
@@ -371,7 +443,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
||||
return fs.isObjectDir(bucket, object)
|
||||
}
|
||||
|
||||
listDir := listDirCacheFactory(isLeaf, cacheTreeWalkIgnoredErrs, c.cache.cfs)
|
||||
listDir := listDirCacheFactory(isLeaf, c.cache.cfs)
|
||||
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh)
|
||||
}
|
||||
|
||||
@@ -572,42 +644,43 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
|
||||
}
|
||||
|
||||
// PutObject - caches the uploaded object for single Put operations
|
||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
putObjectFn := c.PutObjectFn
|
||||
data := r.Reader
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
||||
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||
}
|
||||
size := r.Size()
|
||||
|
||||
// fetch from backend if there is no space on cache drive
|
||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
||||
if !dcache.diskAvailable(size) {
|
||||
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||
}
|
||||
// fetch from backend if cache exclude pattern or cache-control
|
||||
// directive set to exclude
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
||||
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||
}
|
||||
objInfo = ObjectInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, r.MD5HexString(), r.SHA256HexString())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, r.MD5HexString(), r.SHA256HexString())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
oinfoCh := make(chan ObjectInfo)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata)
|
||||
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), metadata, opts)
|
||||
if perr != nil {
|
||||
pipeWriter.CloseWithError(perr)
|
||||
wPipe.CloseWithError(perr)
|
||||
@@ -620,14 +693,14 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata); err != nil {
|
||||
if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), metadata, opts); err != nil {
|
||||
wPipe.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
mwriter := io.MultiWriter(pipeWriter, wPipe)
|
||||
_, err = io.Copy(mwriter, r)
|
||||
_, err = io.Copy(mwriter, data)
|
||||
if err != nil {
|
||||
err = <-errCh
|
||||
return objInfo, err
|
||||
@@ -639,68 +712,69 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
|
||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||
newMultipartUploadFn := c.NewMultipartUploadFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata)
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||
}
|
||||
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata)
|
||||
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||
}
|
||||
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata)
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// create new multipart upload in cache with same uploadID
|
||||
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID)
|
||||
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID, opts)
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
// PutObjectPart - uploads part to backend and cache simultaneously.
|
||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
data := r.Reader
|
||||
putObjectPartFn := c.PutObjectPartFn
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
// make sure cache has at least cacheSizeMultiplier * size available
|
||||
// make sure cache has at least size space available
|
||||
size := data.Size()
|
||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||
if !dcache.diskAvailable(size) {
|
||||
select {
|
||||
case dcache.purgeChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||
}
|
||||
|
||||
info = PartInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pinfoCh := make(chan PartInfo)
|
||||
errorCh := make(chan error)
|
||||
go func() {
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader)
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts)
|
||||
if err != nil {
|
||||
close(pinfoCh)
|
||||
pipeWriter.CloseWithError(err)
|
||||
@@ -712,7 +786,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||
pinfoCh <- info
|
||||
}()
|
||||
go func() {
|
||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader); perr != nil {
|
||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil {
|
||||
wPipe.CloseWithError(perr)
|
||||
return
|
||||
}
|
||||
@@ -754,25 +828,25 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
|
||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
completeMultipartUploadFn := c.CompleteMultipartUploadFn
|
||||
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
|
||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
// perform backend operation
|
||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// create new multipart upload in cache with same uploadID
|
||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -888,14 +962,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||
cache: dcache,
|
||||
exclude: config.Exclude,
|
||||
listPool: newTreeWalkPool(globalLookupTimeout),
|
||||
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
},
|
||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object)
|
||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
||||
},
|
||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata)
|
||||
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||
},
|
||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
|
||||
},
|
||||
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
||||
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
||||
@@ -912,17 +989,17 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
|
||||
},
|
||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata)
|
||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
|
||||
},
|
||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data)
|
||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
},
|
||||
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
||||
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
},
|
||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
},
|
||||
DeleteBucketFn: func(ctx context.Context, bucket string) error {
|
||||
return newObjectLayerFn().DeleteBucket(ctx, bucket)
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -28,21 +27,15 @@ import (
|
||||
)
|
||||
|
||||
// Initialize cache FS objects.
|
||||
func initCacheFSObjects(disk string, cacheMaxUse int, t *testing.T) (*cacheFSObjects, error) {
|
||||
newTestConfig(globalMinioDefaultRegion)
|
||||
var err error
|
||||
obj, err := newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return obj, nil
|
||||
func initCacheFSObjects(disk string, cacheMaxUse int) (*cacheFSObjects, error) {
|
||||
return newCacheFSObjects(disk, globalCacheExpiry, cacheMaxUse)
|
||||
}
|
||||
|
||||
// inits diskCache struct for nDisks
|
||||
func initDiskCaches(drives []string, cacheMaxUse int, t *testing.T) (*diskCache, error) {
|
||||
var cfs []*cacheFSObjects
|
||||
for _, d := range drives {
|
||||
obj, err := initCacheFSObjects(d, cacheMaxUse, t)
|
||||
obj, err := initCacheFSObjects(d, cacheMaxUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -131,11 +124,6 @@ func TestGetCacheFSMaxUse(t *testing.T) {
|
||||
|
||||
// test wildcard patterns for excluding entries from cache
|
||||
func TestCacheExclusion(t *testing.T) {
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
fsDirs, err := getRandomDisks(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -146,7 +134,7 @@ func TestCacheExclusion(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cobj := cobjects.(*cacheObjects)
|
||||
globalServiceDoneCh <- struct{}{}
|
||||
GlobalServiceDoneCh <- struct{}{}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
@@ -204,17 +192,18 @@ func TestDiskCache(t *testing.T) {
|
||||
objInfo.ContentType = contentType
|
||||
objInfo.ETag = etag
|
||||
objInfo.UserDefined = httpMeta
|
||||
opts := ObjectOptions{}
|
||||
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -231,7 +220,7 @@ func TestDiskCache(t *testing.T) {
|
||||
t.Fatal("Cached content-type does not match")
|
||||
}
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -278,23 +267,24 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
objInfo.ContentType = contentType
|
||||
objInfo.ETag = etag
|
||||
objInfo.UserDefined = httpMeta
|
||||
opts := ObjectOptions{}
|
||||
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cache.diskAvailable(int64(size)) {
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
|
||||
if err != errDiskFull {
|
||||
t.Fatal("Cache max-use limit violated.")
|
||||
}
|
||||
} else {
|
||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
||||
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
|
||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -311,7 +301,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
t.Fatal("Cached content-type does not match")
|
||||
}
|
||||
writer := bytes.NewBuffer(nil)
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
|
||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
182
cmd/dummy-data-generator_test.go
Normal file
182
cmd/dummy-data-generator_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var alphabets = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
|
||||
|
||||
// DummyDataGen returns a reader that repeats the bytes in `alphabets`
|
||||
// upto the desired length.
|
||||
type DummyDataGen struct {
|
||||
b []byte
|
||||
idx, length int64
|
||||
}
|
||||
|
||||
// NewDummyDataGen returns a ReadSeeker over the first `totalLength`
|
||||
// bytes from the infinite stream consisting of repeated
|
||||
// concatenations of `alphabets`.
|
||||
//
|
||||
// The skipOffset (generally = 0) can be used to skip a given number
|
||||
// of bytes from the beginning of the infinite stream. This is useful
|
||||
// to compare such streams of bytes that may be split up, because:
|
||||
//
|
||||
// Given the function:
|
||||
//
|
||||
// f := func(r io.Reader) string {
|
||||
// b, _ := ioutil.ReadAll(r)
|
||||
// return string(b)
|
||||
// }
|
||||
//
|
||||
// for example, the following is true:
|
||||
//
|
||||
// f(NewDummyDataGen(100, 0)) == f(NewDummyDataGen(50, 0)) + f(NewDummyDataGen(50, 50))
|
||||
func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
|
||||
if totalLength < 0 {
|
||||
panic("Negative length passed to DummyDataGen!")
|
||||
}
|
||||
if skipOffset < 0 {
|
||||
panic("Negative rotations are not allowed")
|
||||
}
|
||||
|
||||
skipOffset = skipOffset % int64(len(alphabets))
|
||||
as := make([]byte, 2*len(alphabets))
|
||||
copy(as, alphabets)
|
||||
copy(as[len(alphabets):], alphabets)
|
||||
b := as[skipOffset : skipOffset+int64(len(alphabets))]
|
||||
return &DummyDataGen{
|
||||
length: totalLength,
|
||||
b: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DummyDataGen) Read(b []byte) (n int, err error) {
|
||||
k := len(b)
|
||||
numLetters := int64(len(d.b))
|
||||
for k > 0 && d.idx < d.length {
|
||||
w := copy(b[len(b)-k:], d.b[d.idx%numLetters:])
|
||||
k -= w
|
||||
d.idx += int64(w)
|
||||
n += w
|
||||
}
|
||||
if d.idx >= d.length {
|
||||
extraBytes := d.idx - d.length
|
||||
n -= int(extraBytes)
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
if offset < 0 {
|
||||
return 0, errors.New("Invalid offset")
|
||||
}
|
||||
d.idx = offset
|
||||
case io.SeekCurrent:
|
||||
if d.idx+offset < 0 {
|
||||
return 0, errors.New("Invalid offset")
|
||||
}
|
||||
d.idx += offset
|
||||
case io.SeekEnd:
|
||||
if d.length+offset < 0 {
|
||||
return 0, errors.New("Invalid offset")
|
||||
}
|
||||
d.idx = d.length + offset
|
||||
}
|
||||
return d.idx, nil
|
||||
}
|
||||
|
||||
func TestDummyDataGenerator(t *testing.T) {
|
||||
readAll := func(r io.Reader) string {
|
||||
b, _ := ioutil.ReadAll(r)
|
||||
return string(b)
|
||||
}
|
||||
checkEq := func(a, b string) {
|
||||
if a != b {
|
||||
t.Fatalf("Unexpected equality failure")
|
||||
}
|
||||
}
|
||||
|
||||
checkEq(readAll(NewDummyDataGen(0, 0)), "")
|
||||
|
||||
checkEq(readAll(NewDummyDataGen(10, 0)), readAll(NewDummyDataGen(10, int64(len(alphabets)))))
|
||||
|
||||
checkEq(readAll(NewDummyDataGen(100, 0)), readAll(NewDummyDataGen(50, 0))+readAll(NewDummyDataGen(50, 50)))
|
||||
|
||||
r := NewDummyDataGen(100, 0)
|
||||
r.Seek(int64(len(alphabets)), 0)
|
||||
checkEq(readAll(r), readAll(NewDummyDataGen(100-int64(len(alphabets)), 0)))
|
||||
}
|
||||
|
||||
// Compares all the bytes returned by the given readers. Any Read
|
||||
// errors cause a `false` result. A string describing the error is
|
||||
// also returned.
|
||||
func cmpReaders(r1, r2 io.Reader) (bool, string) {
|
||||
bufLen := 32 * 1024
|
||||
b1, b2 := make([]byte, bufLen), make([]byte, bufLen)
|
||||
for i := 0; true; i++ {
|
||||
n1, e1 := io.ReadFull(r1, b1)
|
||||
n2, e2 := io.ReadFull(r2, b2)
|
||||
if n1 != n2 {
|
||||
return false, fmt.Sprintf("Read %d != %d bytes from the readers", n1, n2)
|
||||
}
|
||||
if !bytes.Equal(b1[:n1], b2[:n2]) {
|
||||
return false, fmt.Sprintf("After reading %d equal buffers (32Kib each), we got the following two strings:\n%v\n%v\n",
|
||||
i, b1, b2)
|
||||
}
|
||||
// Check if stream has ended
|
||||
if (e1 == io.ErrUnexpectedEOF && e2 == io.ErrUnexpectedEOF) || (e1 == io.EOF && e2 == io.EOF) {
|
||||
break
|
||||
}
|
||||
if e1 != nil || e2 != nil {
|
||||
return false, fmt.Sprintf("Got unexpected error values: %v == %v", e1, e2)
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func TestCmpReaders(t *testing.T) {
|
||||
{
|
||||
r1 := bytes.NewBuffer([]byte("abc"))
|
||||
r2 := bytes.NewBuffer([]byte("abc"))
|
||||
ok, msg := cmpReaders(r1, r2)
|
||||
if !(ok && msg == "") {
|
||||
t.Fatalf("unexpected")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
r1 := bytes.NewBuffer([]byte("abc"))
|
||||
r2 := bytes.NewBuffer([]byte("abcd"))
|
||||
ok, _ := cmpReaders(r1, r2)
|
||||
if ok {
|
||||
t.Fatalf("unexpected")
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user