Compare commits
756 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f90422a890 | ||
|
|
8befedef14 | ||
|
|
2af3004409 | ||
|
|
38ee40d59c | ||
|
|
d583f1ac0e | ||
|
|
2bcb02f628 | ||
|
|
4301a33371 | ||
|
|
f1a03a4ee8 | ||
|
|
167ddf9c9c | ||
|
|
f833e41e69 | ||
|
|
3780ec699f | ||
|
|
41688a936b | ||
|
|
b2db8123ec | ||
|
|
b330c2c57e | ||
|
|
231c5cf6de | ||
|
|
7214a0160a | ||
|
|
375b79f11b | ||
|
|
661068d1a2 | ||
|
|
3da1869d5e | ||
|
|
000a7aa094 | ||
|
|
7cedc5369d | ||
|
|
e5ecd20d44 | ||
|
|
53aaa5d2a5 | ||
|
|
cccf2de129 | ||
|
|
9d39fb3604 | ||
|
|
4a007e3767 | ||
|
|
95814359bd | ||
|
|
de6c286258 | ||
|
|
d0ae69087c | ||
|
|
5e15b0b844 | ||
|
|
7ea026ff1d | ||
|
|
6e0575a53d | ||
|
|
fb9be81fab | ||
|
|
60791d6dd1 | ||
|
|
eba423bb9d | ||
|
|
301de169e9 | ||
|
|
1868c7016d | ||
|
|
0c71ce3398 | ||
|
|
bc285cf0dd | ||
|
|
209a3ee7af | ||
|
|
7d19ab9f62 | ||
|
|
3f6d624c7b | ||
|
|
c138272d63 | ||
|
|
7dbfea1353 | ||
|
|
c121d27f31 | ||
|
|
43c19a6b82 | ||
|
|
6542bc4a03 | ||
|
|
bede525dc9 | ||
|
|
e45c90060f | ||
|
|
7d79c723e5 | ||
|
|
5f6d6c3b70 | ||
|
|
d15042470e | ||
|
|
f1f414ca59 | ||
|
|
cdf4815a6b | ||
|
|
fade056244 | ||
|
|
a546047c95 | ||
|
|
ea210319ce | ||
|
|
2896e780ae | ||
|
|
eaafb23535 | ||
|
|
baa30f4289 | ||
|
|
2164984d2b | ||
|
|
189c861835 | ||
|
|
6656fa3066 | ||
|
|
0cc2ed04f5 | ||
|
|
b11adfa5cd | ||
|
|
9baeda781a | ||
|
|
bd032d13ff | ||
|
|
730775bb4e | ||
|
|
d31eaddba3 | ||
|
|
3202f78f0f | ||
|
|
6de410a0aa | ||
|
|
a3f41c7049 | ||
|
|
1847f17f50 | ||
|
|
1bc32215b9 | ||
|
|
96009975d6 | ||
|
|
de9b391db3 | ||
|
|
a62572fb86 | ||
|
|
011a2c0b78 | ||
|
|
daf4418cbb | ||
|
|
0f8df3d340 | ||
|
|
9cac385aec | ||
|
|
814ddc0923 | ||
|
|
247795dd36 | ||
|
|
dfadf70a7f | ||
|
|
d348ec0f6c | ||
|
|
b730bd1396 | ||
|
|
56e0c6adf8 | ||
|
|
f44a960dcd | ||
|
|
6c1bbf918d | ||
|
|
48e614b167 | ||
|
|
fe8d33452b | ||
|
|
c19ece6921 | ||
|
|
216fa57b88 | ||
|
|
a9558ae248 | ||
|
|
ee9077db7d | ||
|
|
9c85928740 | ||
|
|
af0309371e | ||
|
|
ead3c186a6 | ||
|
|
6ac48a65cb | ||
|
|
2ecf5ba1de | ||
|
|
94f1a1dea3 | ||
|
|
c045ae15e7 | ||
|
|
1756b7c6ff | ||
|
|
e25ace2151 | ||
|
|
a8e5a86fa0 | ||
|
|
f8edc233ab | ||
|
|
337c2a7cb4 | ||
|
|
2d735144b9 | ||
|
|
0113035237 | ||
|
|
52a1d248b2 | ||
|
|
b5ed42c845 | ||
|
|
d9e7cadacf | ||
|
|
36e88cbd50 | ||
|
|
6d76efb9bb | ||
|
|
a1de9cec58 | ||
|
|
6885c72f32 | ||
|
|
0f1389e992 | ||
|
|
5bf3eeaa77 | ||
|
|
9dda1fd624 | ||
|
|
2dc46cb153 | ||
|
|
0674c0075e | ||
|
|
518ef670da | ||
|
|
0dd626ec67 | ||
|
|
53f4c0fdc0 | ||
|
|
7e3ea77fdf | ||
|
|
7290d23b26 | ||
|
|
24f20eb1bd | ||
|
|
4c9de098b0 | ||
|
|
a2ccba69e5 | ||
|
|
8eb99d3a87 | ||
|
|
3773874cd3 | ||
|
|
6c62b1a2ea | ||
|
|
b768645fde | ||
|
|
7b58dcb28c | ||
|
|
fea4a1e68e | ||
|
|
a9e83dd42c | ||
|
|
145f501a21 | ||
|
|
9b3b04ecec | ||
|
|
3e063cca5c | ||
|
|
27d716c663 | ||
|
|
fbd15cb7b7 | ||
|
|
f7c91eff54 | ||
|
|
a6bdc086a2 | ||
|
|
1242dd951a | ||
|
|
d1c8e9f31b | ||
|
|
83ccae6c8b | ||
|
|
086be07bf5 | ||
|
|
28f9c477a8 | ||
|
|
09571d03a5 | ||
|
|
71ce63f79c | ||
|
|
5205c9591f | ||
|
|
9a547dcbfb | ||
|
|
27632ca6ec | ||
|
|
c7470e6e6e | ||
|
|
d090a17ed0 | ||
|
|
c2529260e7 | ||
|
|
da87188ff8 | ||
|
|
d099039f5d | ||
|
|
5dd9cf4398 | ||
|
|
ab77b216d1 | ||
|
|
b37a02cddf | ||
|
|
c3c3e9087b | ||
|
|
7ad6bc955f | ||
|
|
7a5271ad96 | ||
|
|
1b122526aa | ||
|
|
a3b266761e | ||
|
|
498389123e | ||
|
|
bc61417284 | ||
|
|
97d952e61c | ||
|
|
073aac3d92 | ||
|
|
eff4127efd | ||
|
|
b1c0c32ba6 | ||
|
|
f14bf25cb9 | ||
|
|
f216670814 | ||
|
|
6ecc98fddb | ||
|
|
4843affd0e | ||
|
|
558785a4bb | ||
|
|
60d415bb8a | ||
|
|
45e22cf8aa | ||
|
|
e4900b99d7 | ||
|
|
20766069a8 | ||
|
|
e8160c9fae | ||
|
|
957ecb1b64 | ||
|
|
ac5061df2c | ||
|
|
cddb2714ef | ||
|
|
d7d9cac20b | ||
|
|
6817c5ea58 | ||
|
|
4cd6ca02c7 | ||
|
|
a5efcbab51 | ||
|
|
85be7b39ac | ||
|
|
ebf3dda449 | ||
|
|
582953260b | ||
|
|
2d1ea86fc6 | ||
|
|
322385f1b6 | ||
|
|
1b38aed05f | ||
|
|
a69c98e394 | ||
|
|
282c9f790a | ||
|
|
2eeb0e6a0b | ||
|
|
3ff5bf2369 | ||
|
|
69ee28a082 | ||
|
|
d02deff3d7 | ||
|
|
3e78ea8acc | ||
|
|
b54c0f0ef3 | ||
|
|
c79358c67e | ||
|
|
75107d7698 | ||
|
|
c4464e36c8 | ||
|
|
d92db198d1 | ||
|
|
8bae956df6 | ||
|
|
7758524703 | ||
|
|
c82fa2c829 | ||
|
|
a51280fd20 | ||
|
|
bd437c1c17 | ||
|
|
69fb68ef0b | ||
|
|
787dbaff36 | ||
|
|
c50ae1fdbe | ||
|
|
bde0f444db | ||
|
|
6a8298b137 | ||
|
|
f19cbfad5c | ||
|
|
78f2183e70 | ||
|
|
5c11a46412 | ||
|
|
8a94aebdb8 | ||
|
|
ec11e99667 | ||
|
|
37d066b563 | ||
|
|
bfec5fe200 | ||
|
|
525287f4b6 | ||
|
|
9054ce73b2 | ||
|
|
d079adc167 | ||
|
|
a9d401ac10 | ||
|
|
1fa65c7f2f | ||
|
|
cc9b63eb51 | ||
|
|
7e12eab3ad | ||
|
|
fa685d7d9c | ||
|
|
4314ee1670 | ||
|
|
7d636a7c13 | ||
|
|
bf9d51cf14 | ||
|
|
29e0727b58 | ||
|
|
c434dff0a4 | ||
|
|
b2a8cb4aba | ||
|
|
b412a222ae | ||
|
|
79bcb705bf | ||
|
|
9f81d014f1 | ||
|
|
3184205519 | ||
|
|
7c919329e8 | ||
|
|
db41953618 | ||
|
|
cea078a593 | ||
|
|
f44cfb2863 | ||
|
|
1b45be0d60 | ||
|
|
6bb693488c | ||
|
|
e20e08d700 | ||
|
|
ac07df2985 | ||
|
|
2054ca5c9a | ||
|
|
e51e465543 | ||
|
|
2bbc6a83e8 | ||
|
|
a78731a3ba | ||
|
|
f4e779c964 | ||
|
|
a973402821 | ||
|
|
44decbeae0 | ||
|
|
3ea1be3c52 | ||
|
|
2642e12d14 | ||
|
|
e7276b7b9b | ||
|
|
e375341c33 | ||
|
|
2c20716f37 | ||
|
|
928f5b0564 | ||
|
|
91f21ddc47 | ||
|
|
43a3778b45 | ||
|
|
b9b1bfefe7 | ||
|
|
05cda35b14 | ||
|
|
2155e74951 | ||
|
|
4714958e99 | ||
|
|
c6e62b9175 | ||
|
|
ab66b23194 | ||
|
|
73f9d8a636 | ||
|
|
541a778d7b | ||
|
|
d49f2ec19c | ||
|
|
8dd63a462f | ||
|
|
9902c9baaa | ||
|
|
7de29e6e6b | ||
|
|
336460f67e | ||
|
|
95e89f1712 | ||
|
|
886ae15464 | ||
|
|
d8af244708 | ||
|
|
c8243706b4 | ||
|
|
30707659b5 | ||
|
|
90c365a174 | ||
|
|
7b732b566f | ||
|
|
ba52a925f9 | ||
|
|
fdda5f98c6 | ||
|
|
fa4d627b57 | ||
|
|
2c3e34f001 | ||
|
|
7f8f1ad4e3 | ||
|
|
6f992134a2 | ||
|
|
0c80bf45d0 | ||
|
|
2777956581 | ||
|
|
b207520d98 | ||
|
|
2196fd9cd5 | ||
|
|
ef6304c5c2 | ||
|
|
6b984410d5 | ||
|
|
813e0fc1a8 | ||
|
|
38cf263409 | ||
|
|
6f6a2214fc | ||
|
|
791821d590 | ||
|
|
9a951da881 | ||
|
|
e7a0be5bd3 | ||
|
|
ff932ca2a0 | ||
|
|
818d3bcaf5 | ||
|
|
45b1c66195 | ||
|
|
da04cb91ce | ||
|
|
cfc9cfd84a | ||
|
|
ea18e51f4d | ||
|
|
3d3beb6a9d | ||
|
|
27b8f18cce | ||
|
|
bf545dc320 | ||
|
|
f001e99fcd | ||
|
|
b4bfdc92cc | ||
|
|
ae654831aa | ||
|
|
1ffa983a9d | ||
|
|
ecf1566266 | ||
|
|
c5b87f93dd | ||
|
|
b1a2169dcc | ||
|
|
d45a1808f2 | ||
|
|
db2155551a | ||
|
|
09d35d3b4c | ||
|
|
5b9342d35c | ||
|
|
8d98662633 | ||
|
|
7fdeb44372 | ||
|
|
59dced8237 | ||
|
|
496f4a7dc7 | ||
|
|
8b880a246a | ||
|
|
eeb5942b6b | ||
|
|
7ec904d67b | ||
|
|
c9212819af | ||
|
|
10fd53d6bb | ||
|
|
3fea1d5e35 | ||
|
|
35ecc04223 | ||
|
|
3ca9f5ffa3 | ||
|
|
2e9fed1a14 | ||
|
|
6b92f3fd99 | ||
|
|
06e30b5aa1 | ||
|
|
603cf2a8bb | ||
|
|
a54cdb9587 | ||
|
|
ed4bd20a7c | ||
|
|
cfd12914e1 | ||
|
|
c55aeaf814 | ||
|
|
fdf65aa9b9 | ||
|
|
69b2aacf5a | ||
|
|
0af62d35a0 | ||
|
|
7c32f3f554 | ||
|
|
cec8cdb35e | ||
|
|
5ab9cc029d | ||
|
|
667f42515a | ||
|
|
3614cb7a8b | ||
|
|
b809c84338 | ||
|
|
33edb072a3 | ||
|
|
6a00eb10bf | ||
|
|
792ee48d2c | ||
|
|
52873ac3a3 | ||
|
|
88ae0f1196 | ||
|
|
23a0415eb7 | ||
|
|
75a0661213 | ||
|
|
a1c7c9ea73 | ||
|
|
7f19a9a617 | ||
|
|
2f2c7d91a8 | ||
|
|
9ad1c2d07d | ||
|
|
07a7f329e7 | ||
|
|
c7ca791c58 | ||
|
|
d9be8bc693 | ||
|
|
9fc7537f2a | ||
|
|
f1b2462193 | ||
|
|
8fbf2b0b2a | ||
|
|
c93157019f | ||
|
|
e3b44c3829 | ||
|
|
978bd4e2c4 | ||
|
|
bb942c7311 | ||
|
|
5d25b10f72 | ||
|
|
eac02c04f7 | ||
|
|
1330e59307 | ||
|
|
2dd14c0b89 | ||
|
|
5b8975bf4b | ||
|
|
6f66f1a910 | ||
|
|
deb3911f5e | ||
|
|
23a8411732 | ||
|
|
ece0d4ac53 | ||
|
|
4c92bec619 | ||
|
|
dcd63b4146 | ||
|
|
224b4f13b8 | ||
|
|
51a9d1bdb7 | ||
|
|
b2db1e96e2 | ||
|
|
ab7d3cd508 | ||
|
|
852fb320f7 | ||
|
|
8fb37a8417 | ||
|
|
d4dcf1d722 | ||
|
|
02a59a04d1 | ||
|
|
16a6e68d7b | ||
|
|
1b427ddb69 | ||
|
|
02acff7fac | ||
|
|
712e82344c | ||
|
|
9f298d2311 | ||
|
|
716a52f261 | ||
|
|
e4020fb41f | ||
|
|
d1144c2c7e | ||
|
|
78125ee853 | ||
|
|
9ecd66007f | ||
|
|
64ec17b463 | ||
|
|
6b9805e891 | ||
|
|
013773065c | ||
|
|
7d6766adc6 | ||
|
|
c56c2f5fd3 | ||
|
|
7e819d00ea | ||
|
|
33767266e7 | ||
|
|
63be4709b7 | ||
|
|
6b1f2fc133 | ||
|
|
f749a9bf0e | ||
|
|
9b4d46a6ed | ||
|
|
502e652b7a | ||
|
|
de924605a1 | ||
|
|
15e2ea2c96 | ||
|
|
07edb7c7f8 | ||
|
|
d0cea7adea | ||
|
|
2165d45d3f | ||
|
|
6d5d77f62c | ||
|
|
49df290270 | ||
|
|
b1bfd75fcf | ||
|
|
e5951e30d0 | ||
|
|
45d725c0a3 | ||
|
|
c2c5b09bb1 | ||
|
|
086fbb745e | ||
|
|
4f37c8ccf2 | ||
|
|
026265f8f7 | ||
|
|
f91c072f61 | ||
|
|
52bdbcd046 | ||
|
|
301c50b721 | ||
|
|
e9c111c8d0 | ||
|
|
278a165674 | ||
|
|
a05af9bb98 | ||
|
|
e934c3e2a2 | ||
|
|
2d295a31de | ||
|
|
9bbf5cb74f | ||
|
|
680e493065 | ||
|
|
1ea2449269 | ||
|
|
7ce63b3078 | ||
|
|
7432b5c9b2 | ||
|
|
d76160c245 | ||
|
|
5d838edcef | ||
|
|
c9116e6bd7 | ||
|
|
d7dc9aaf52 | ||
|
|
bfe8a9bccc | ||
|
|
9990464cd5 | ||
|
|
881e983ed9 | ||
|
|
f6a7d4d29b | ||
|
|
f98616dce7 | ||
|
|
5bd0e95eef | ||
|
|
0414f01b6e | ||
|
|
0cbebf0f57 | ||
|
|
4cb6ebcfa2 | ||
|
|
2232e095d5 | ||
|
|
aae337f5b5 | ||
|
|
a78e5d4763 | ||
|
|
cf37c7997e | ||
|
|
1ffbb5c24c | ||
|
|
b9c48e0ab0 | ||
|
|
fe5d599802 | ||
|
|
55063906b5 | ||
|
|
c7178d2066 | ||
|
|
f14f60a487 | ||
|
|
e2b3c083aa | ||
|
|
3011692d93 | ||
|
|
86252ec7e1 | ||
|
|
ef1aa870c5 | ||
|
|
c260182412 | ||
|
|
61c17c8933 | ||
|
|
dd93eee1e3 | ||
|
|
7a400542ae | ||
|
|
88286cf8d0 | ||
|
|
8cb6184f1d | ||
|
|
b849fd7a75 | ||
|
|
09ee145e9c | ||
|
|
04dfcbfdd7 | ||
|
|
23e46f9dba | ||
|
|
fc5213258e | ||
|
|
005ebbb9b2 | ||
|
|
017067e11f | ||
|
|
2bb69033e5 | ||
|
|
fca4ee84c9 | ||
|
|
60e60f68dd | ||
|
|
ba758361b3 | ||
|
|
c6b218e5df | ||
|
|
c18fbdb29a | ||
|
|
b1ad99edbf | ||
|
|
080e0c2323 | ||
|
|
935546d5ca | ||
|
|
64fde1ab95 | ||
|
|
169e8742fc | ||
|
|
069876e262 | ||
|
|
442e1698cb | ||
|
|
d76518eeb9 | ||
|
|
0879a4f743 | ||
|
|
9be7066715 | ||
|
|
d8660b30cc | ||
|
|
30922148fb | ||
|
|
37b32199e3 | ||
|
|
d74818b227 | ||
|
|
627fdfeab7 | ||
|
|
3320878dfb | ||
|
|
3f20011862 | ||
|
|
5f34b5e6a5 | ||
|
|
9199033db7 | ||
|
|
2bf6cf0e15 | ||
|
|
686d4656de | ||
|
|
5aa5dcdc6d | ||
|
|
0a70bc24ac | ||
|
|
656146b699 | ||
|
|
5e40b9a563 | ||
|
|
89d1221217 | ||
|
|
4cd1bbb50a | ||
|
|
c2cde6beb5 | ||
|
|
abc1c1070a | ||
|
|
fd56aa42a6 | ||
|
|
3d318bae76 | ||
|
|
aa2e89bfe3 | ||
|
|
60813bef29 | ||
|
|
b123be5612 | ||
|
|
933c60bc3a | ||
|
|
796cca4166 | ||
|
|
fe379f9428 | ||
|
|
ae0b165431 | ||
|
|
7a06e158f1 | ||
|
|
5eab3db344 | ||
|
|
9f44fcd540 | ||
|
|
b9b2b37366 | ||
|
|
7f31d933a8 | ||
|
|
6695fd6a61 | ||
|
|
84e55e2e6f | ||
|
|
b00cda8ad4 | ||
|
|
d861edfc00 | ||
|
|
dd311623df | ||
|
|
cb935980a5 | ||
|
|
157721f694 | ||
|
|
97d799b9f0 | ||
|
|
0b7bd024fb | ||
|
|
3af70b36fd | ||
|
|
8eba97da74 | ||
|
|
669c9da85d | ||
|
|
5d09233115 | ||
|
|
b2044dd22f | ||
|
|
c31e67dcce | ||
|
|
cd59a945d8 | ||
|
|
d4a390028a | ||
|
|
c476b27a65 | ||
|
|
eaa838a63f | ||
|
|
4364102363 | ||
|
|
99ad445260 | ||
|
|
54431b3953 | ||
|
|
f68a7005c0 | ||
|
|
725172e13b | ||
|
|
0f092c2be8 | ||
|
|
a3c8ef79a4 | ||
|
|
01468d5a75 | ||
|
|
8f1243986e | ||
|
|
dc1eb57f4e | ||
|
|
586614c73f | ||
|
|
d140074773 | ||
|
|
39face27cf | ||
|
|
e047ac52b8 | ||
|
|
04de3ea4bd | ||
|
|
e6ce9da087 | ||
|
|
c9c0d5eec2 | ||
|
|
63c3114657 | ||
|
|
63ac1d6d18 | ||
|
|
9bb0869b73 | ||
|
|
5f2318567e | ||
|
|
16ac4a3c64 | ||
|
|
c8d82588c2 | ||
|
|
842d0241ed | ||
|
|
1dc5f2d0af | ||
|
|
c10ecacf91 | ||
|
|
1cf3e3b7b5 | ||
|
|
c3d4c1f584 | ||
|
|
471a3a650a | ||
|
|
cc02bf0442 | ||
|
|
39e8e4f4aa | ||
|
|
80558e839d | ||
|
|
ca62ac65d4 | ||
|
|
f5abe4e1f1 | ||
|
|
3211cb5df6 | ||
|
|
abc266caa1 | ||
|
|
c364f0af6c | ||
|
|
555969ee42 | ||
|
|
e2c5d29017 | ||
|
|
fa00a84709 | ||
|
|
d266b3a066 | ||
|
|
db3f41fcb4 | ||
|
|
24fb1bf258 | ||
|
|
8b803491af | ||
|
|
10b2f15f6f | ||
|
|
3e9ab5f4a9 | ||
|
|
46b4dd8e20 | ||
|
|
bf3a97d3aa | ||
|
|
3b67f629a4 | ||
|
|
3c30e4503d | ||
|
|
8390bc26db | ||
|
|
24ad59316d | ||
|
|
be0c8b1ec0 | ||
|
|
476111968a | ||
|
|
f1e2e1cc9e | ||
|
|
97deba2a7c | ||
|
|
3df7285c3c | ||
|
|
d2dc964cb5 | ||
|
|
d8e3de0cae | ||
|
|
c9940d8c3f | ||
|
|
794eb54da8 | ||
|
|
2ab8d5e47f | ||
|
|
947bc8c7d3 | ||
|
|
5d3d57c12a | ||
|
|
0bfd20a8e3 | ||
|
|
db3dbcce3a | ||
|
|
b21835f195 | ||
|
|
c7844fb1fb | ||
|
|
2ff8132e2d | ||
|
|
30e80d0a86 | ||
|
|
87443af49e | ||
|
|
5d65428b29 | ||
|
|
78eb3b78bb | ||
|
|
720442b1a2 | ||
|
|
e542084c37 | ||
|
|
f931fc7bfb | ||
|
|
0a56e33ce1 | ||
|
|
c3771df641 | ||
|
|
890b493a2e | ||
|
|
fb708b6b64 | ||
|
|
f96e902f63 | ||
|
|
06bd1e582a | ||
|
|
fb43d64dc3 | ||
|
|
fd0fa4e5c5 | ||
|
|
4e9de58675 | ||
|
|
9565641b9b | ||
|
|
4da68cfcfc | ||
|
|
a9b87c0a16 | ||
|
|
5ac4b517c9 | ||
|
|
ca96560d56 | ||
|
|
cc1a84b62e | ||
|
|
8392d2f510 | ||
|
|
347b29d059 | ||
|
|
3a34d98db8 | ||
|
|
7cdb67680e | ||
|
|
929951fd49 | ||
|
|
13a3d17321 | ||
|
|
a8e156d6a5 | ||
|
|
c9be601988 | ||
|
|
13e2b97ad9 | ||
|
|
95e5d7a9c3 | ||
|
|
32c200fe12 | ||
|
|
1dd38750f7 | ||
|
|
e3273bc5bf | ||
|
|
26a866a202 | ||
|
|
60690a7e1d | ||
|
|
e9b2bf00ad | ||
|
|
069b8ee8ff | ||
|
|
64759189f5 | ||
|
|
fb48ca5020 | ||
|
|
2dad14974e | ||
|
|
d97d53bddc | ||
|
|
1027afa853 | ||
|
|
aa04f97f95 | ||
|
|
4082764d48 | ||
|
|
822eb5ddc7 | ||
|
|
1c90a6bd49 | ||
|
|
26863009c0 | ||
|
|
26e760ee62 | ||
|
|
1e457dda7e | ||
|
|
26ed9e81b1 | ||
|
|
4e63e0e372 | ||
|
|
07a556a10b | ||
|
|
d28bcb4f84 | ||
|
|
fa325665b1 | ||
|
|
eac518b178 | ||
|
|
9e7a3e6adc | ||
|
|
599aae5ba6 | ||
|
|
8cc5ecec23 | ||
|
|
38e6d911ea | ||
|
|
47b13cdb80 | ||
|
|
51456e6adc | ||
|
|
1f481c0967 | ||
|
|
e31b7cdcd5 | ||
|
|
4cec0501ca | ||
|
|
1b6de05a51 | ||
|
|
4e6bf136a8 | ||
|
|
a2825702f8 | ||
|
|
a49d4a9cb2 | ||
|
|
10b526ed86 | ||
|
|
980bf78b4d | ||
|
|
8aaaa46be9 | ||
|
|
53862d4a28 | ||
|
|
ddc908207d | ||
|
|
ee4a6a823d | ||
|
|
8836d57e3c | ||
|
|
f01d53b20f | ||
|
|
40fcd3dc48 | ||
|
|
1b74ce3924 | ||
|
|
7bf093c06a | ||
|
|
140a7eadb4 | ||
|
|
fce2d6ddd1 | ||
|
|
18cb15559d | ||
|
|
3adc311c1c | ||
|
|
ee05280721 | ||
|
|
c4e2af8ca3 | ||
|
|
5afb1b6747 | ||
|
|
f2cc97a44c | ||
|
|
d48fd6fde9 | ||
|
|
b7ee0bbbc9 | ||
|
|
68a519a468 | ||
|
|
c33bae057f | ||
|
|
d7060c4c32 | ||
|
|
8964ef821f | ||
|
|
bd10640846 | ||
|
|
175b07d6e4 | ||
|
|
f16df2a4e7 | ||
|
|
1c90485b56 | ||
|
|
2320a877bc | ||
|
|
ae752ed1fa | ||
|
|
c566cc6b61 | ||
|
|
36e12a6038 | ||
|
|
42531db37e | ||
|
|
6a4ef2e48e | ||
|
|
d2a8be6fc2 | ||
|
|
290ad0996f | ||
|
|
bffc378a4f | ||
|
|
3b8adf7528 | ||
|
|
002ac82631 | ||
|
|
e85df07518 | ||
|
|
589e32a4ed | ||
|
|
74008446fe | ||
|
|
e48005ddc7 | ||
|
|
83066f953c | ||
|
|
90bfa6260a | ||
|
|
8b80eca184 | ||
|
|
fb1374f2f7 | ||
|
|
ff5bf51952 | ||
|
|
61927d228c | ||
|
|
82b9f2c931 | ||
|
|
6a19d7b25a | ||
|
|
ac2e0596bd | ||
|
|
42c821e164 | ||
|
|
20b907d8fb | ||
|
|
f45977d371 | ||
|
|
4ec9b349d0 | ||
|
|
65ac7c5671 | ||
|
|
5c2af3f792 | ||
|
|
8771e83545 | ||
|
|
fa5a1cebd9 | ||
|
|
4f981a0b42 | ||
|
|
127641731a | ||
|
|
c1a17c2561 | ||
|
|
1c5b05c130 | ||
|
|
4155f4e49b | ||
|
|
f3022e891d | ||
|
|
c28405a5c2 |
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: MinIO Community Support
|
||||
url: https://slack.min.io
|
||||
about: Please ask and answer questions here.
|
||||
- name: MinIO SUBNET Support
|
||||
url: https://min.io/pricing
|
||||
about: Join this for Enterprise Support.
|
||||
39
.github/lock.yml
vendored
Normal file
39
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
1
.github/logo.svg
vendored
Normal file
1
.github/logo.svg
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<svg data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 162.612 24.465"><path d="M52.751.414h9.108v23.63h-9.108zM41.711.74l-18.488 9.92a.919.919 0 0 1-.856 0L3.879.74A2.808 2.808 0 0 0 2.558.414h-.023A2.4 2.4 0 0 0 0 2.641v21.376h9.1V13.842a.918.918 0 0 1 1.385-.682l10.361 5.568a3.634 3.634 0 0 0 3.336.028l10.933-5.634a.917.917 0 0 1 1.371.69v10.205h9.1V2.641A2.4 2.4 0 0 0 43.055.414h-.023a2.808 2.808 0 0 0-1.321.326zm65.564-.326h-9.237v10.755a.913.913 0 0 1-1.338.706L72.762.675a2.824 2.824 0 0 0-1.191-.261h-.016a2.4 2.4 0 0 0-2.535 2.227v21.377h9.163V13.275a.914.914 0 0 1 1.337-.707l24.032 11.2a2.813 2.813 0 0 0 1.188.26 2.4 2.4 0 0 0 2.535-2.227zm7.161 23.63V.414h4.191v23.63zm28.856.421c-11.274 0-19.272-4.7-19.272-12.232C124.02 4.741 132.066 0 143.292 0s19.32 4.7 19.32 12.233-7.902 12.232-19.32 12.232zm0-21.333c-8.383 0-14.84 3.217-14.84 9.1 0 5.926 6.457 9.1 14.84 9.1s14.887-3.174 14.887-9.1c0-5.883-6.504-9.1-14.887-9.1z" fill="#c72c48"/></svg>
|
||||
|
After Width: | Height: | Size: 978 B |
59
.github/stale.yml
vendored
Normal file
59
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 90
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 30
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >-
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed after 21 days if no further activity
|
||||
occurs. Thank you for your contributions.
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 1
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
52
.github/workflows/go.yml
vendored
Normal file
52
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo apt-get install devscripts shellcheck
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
make verify-healing
|
||||
cd browser && npm install && npm run test && cd ..
|
||||
bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
27
.golangci.yml
Normal file
27
.golangci.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- typecheck
|
||||
- goimports
|
||||
- misspell
|
||||
- govet
|
||||
- golint
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"numFilesToCheck": 10,
|
||||
"requiredOrgs": ["minio"]
|
||||
}
|
||||
58
.travis.yml
58
.travis.yml
@@ -1,58 +0,0 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
go: 1.13.x
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 15m "$d"; done
|
||||
- make verifiers
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- make coverage
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- bash buildscripts/go-coverage.sh
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install 11.10.1 ; fi
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.
|
||||
|
||||
|
||||
@@ -5,24 +5,25 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
ENV GOPROXY https://proxy.golang.org
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY --from=0 /go/minio/CREDITS /third_party/
|
||||
COPY --from=0 /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
|
||||
41
Dockerfile.arm.release
Normal file
41
Dockerfile.arm.release
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm32v7/alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
41
Dockerfile.arm64.release
Normal file
41
Dockerfile.arm64.release
Normal file
@@ -0,0 +1,41 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm64v8/alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,13 +1,15 @@
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
|
||||
13
Dockerfile.dev.browser
Normal file
13
Dockerfile.dev.browser
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM ubuntu
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
git golang make npm && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH=$PATH:/root/go/bin
|
||||
|
||||
RUN go get github.com/go-bindata/go-bindata/go-bindata && \
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
|
||||
@@ -1,17 +1,11 @@
|
||||
FROM ubuntu:16.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
ENV GOPATH /usr/local
|
||||
|
||||
ENV GOPATH /usr/local/gopath
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
COPY mint /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
|
||||
@@ -8,15 +8,17 @@ RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.13
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
ENV GO111MODULE=on
|
||||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo
|
||||
RUN touch /etc/sudoers
|
||||
|
||||
RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
|
||||
RUN echo "ci ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
RUN echo "Defaults env_reset" >> /etc/sudoers
|
||||
RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go:/usr/local/go/bin"' >> /etc/sudoers
|
||||
|
||||
RUN mkdir -p /home/ci/.cache
|
||||
|
||||
RUN groupadd -g 999 ci && \
|
||||
useradd -r -u 999 -g ci ci && \
|
||||
chown -R ci:ci /go /home/ci && \
|
||||
chmod -R a+rw /go
|
||||
|
||||
USER ci
|
||||
|
||||
# -- tests --
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 20m "$d"; done
|
||||
RUN make verifiers
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
COPY mint /mint
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
RUN /usr/bin/gateway-tests.sh
|
||||
52
Makefile
52
Makefile
@@ -18,22 +18,12 @@ checks:
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && GO111MODULE=off go get -u golang.org/x/lint/golint)
|
||||
ifeq ($(GOARCH),s390x)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
|
||||
else
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
endif
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
|
||||
vet:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
verifiers: getdeps fmt lint
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -42,21 +32,8 @@ fmt:
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
staticcheck:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
@@ -64,18 +41,26 @@ test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
test-race: verifiers build
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
coverage: build
|
||||
@echo "Running all coverage for minio"
|
||||
@(env bash $(PWD)/buildscripts/go-coverage.sh)
|
||||
# Verify healing of disks with minio binary
|
||||
verify-healing:
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@@ -93,3 +78,4 @@ clean:
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
@rm -rvf .verify*
|
||||
|
||||
4
NOTICE
4
NOTICE
@@ -1,4 +1,4 @@
|
||||
MinIO Cloud Storage, (C) 2014,2015 MinIO, Inc.
|
||||
MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
|
||||
|
||||
This product includes software developed at MinIO, Inc.
|
||||
(https://min.io/).
|
||||
@@ -6,4 +6,4 @@ This product includes software developed at MinIO, Inc.
|
||||
The MinIO project contains unmodified/modified subcomponents too with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for these subcomponents is subject to the terms and conditions
|
||||
of the following licenses.
|
||||
of Apache License Version 2.0
|
||||
|
||||
38
README.md
38
README.md
@@ -1,14 +1,9 @@
|
||||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible[1] with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
[](https://min.io)
|
||||
|
||||
MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL.
|
||||
|
||||
[1] MinIO in its default mode is faster and does not calculate MD5Sum unless passed by client. This may lead to incompatibility with some S3 clients like s3ql that heavily depend on MD5Sum. For such applications start MinIO with `--compat` option.
|
||||
```sh
|
||||
minio --compat server /data
|
||||
```
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
@@ -22,11 +17,12 @@ docker run -p 9000:9000 minio/minio server /data
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
```
|
||||
Note: Docker will not display the autogenerated keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use autogenerated keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
Install minio packages using [Homebrew](http://brew.sh/)
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](https://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
@@ -39,8 +35,8 @@ brew install minio/stable/minio
|
||||
```
|
||||
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Apple macOS | 64-bit Intel | https://dl.min.io/server/minio/release/darwin-amd64/minio |
|
||||
```sh
|
||||
chmod 755 minio
|
||||
@@ -49,8 +45,8 @@ chmod 755 minio
|
||||
|
||||
## GNU/Linux
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
@@ -58,8 +54,8 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
|
||||
@@ -69,8 +65,8 @@ chmod +x minio
|
||||
|
||||
## Microsoft Windows
|
||||
### Binary Download
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Platform | Architecture | URL |
|
||||
| ---------- | -------- | ------ |
|
||||
| Microsoft Windows | 64-bit | https://dl.min.io/server/minio/release/windows-amd64/minio.exe |
|
||||
```sh
|
||||
minio.exe server D:\Photos
|
||||
@@ -78,7 +74,7 @@ minio.exe server D:\Photos
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
Install minio packages using [pkg](https://github.com/freebsd/pkg)
|
||||
Install minio packages using [pkg](https://github.com/freebsd/pkg), MinIO doesn't officially build FreeBSD binaries but is maintained by FreeBSD upstream [here](https://www.freshports.org/www/minio).
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -88,7 +84,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.12](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口,非常适合于存储大容量非结构化的数据,例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等,而一个对象文件可以是任意大小,从几kb到最大5T不等。
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -8,12 +8,12 @@ Whenever there is a security update you just need to upgrade to the latest versi
|
||||
## Reporting a Vulnerability
|
||||
|
||||
All security bugs in [minio/minio](https://github,com/minio/minio) (or other minio/* repositories)
|
||||
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
|
||||
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
|
||||
in handling your report.
|
||||
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
|
||||
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
|
||||
in handling your report.
|
||||
|
||||
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
|
||||
issue (DoS, authentication bypass, information disclouse, ...) and the assumptions you're making (e.g. do
|
||||
Please, provide a detailed explanation of the issue. In particular, outline the type of the security
|
||||
issue (DoS, authentication bypass, information disclose, ...) and the assumptions you're making (e.g. do
|
||||
you need access credentials for a successful exploit).
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security team
|
||||
@@ -28,14 +28,14 @@ MinIO uses the following disclosure process:
|
||||
|
||||
1. Once the security report is received one member of the security team tries to verify and reproduce
|
||||
the issue and determines the impact it has.
|
||||
2. A member of the security team will respond and either confrim or reject the security report.
|
||||
If the report is rejected the repsonse explains why.
|
||||
2. A member of the security team will respond and either confirm or reject the security report.
|
||||
If the report is rejected the response explains why.
|
||||
3. Code is audited to find any potential similar problems.
|
||||
4. Fixes are prepared for the latest release.
|
||||
4. Fixes are prepared for the latest release.
|
||||
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
|
||||
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
|
||||
the security issue. By default MinIO will **not** publish this information to protect your privacy.
|
||||
|
||||
|
||||
This process can take some time, especially when coordination is required with maintainers of other projects.
|
||||
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
|
||||
follow the process described above to ensure that disclosures are handled consistently.
|
||||
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we
|
||||
follow the process described above to ensure that disclosures are handled consistently.
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
``MinIO Browser`` provides minimal set of UI to manage buckets and objects on ``minio`` server. ``MinIO Browser`` is written in javascript and released under [Apache 2.0 License](./LICENSE).
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Install node
|
||||
@@ -11,6 +12,11 @@ exec -l $SHELL
|
||||
nvm install stable
|
||||
```
|
||||
|
||||
### Install node dependencies
|
||||
```sh
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
@@ -30,13 +36,16 @@ npm run release
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
### Run MinIO Browser with live reload
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on custom port
|
||||
|
||||
@@ -54,7 +63,7 @@ index 3ccdaba..9496c56 100644
|
||||
+ port: 8888,
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
target: 'http://localhost:9000',
|
||||
@@ -97,7 +98,7 @@ var exports = {
|
||||
if (process.env.NODE_ENV === 'dev') {
|
||||
exports.entry = [
|
||||
@@ -70,4 +79,102 @@ index 3ccdaba..9496c56 100644
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on any IP
|
||||
|
||||
Edit `browser/webpack.config.js`
|
||||
|
||||
```diff
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
```
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
## Run tests
|
||||
|
||||
npm run test
|
||||
|
||||
|
||||
## Docker development environment
|
||||
|
||||
This approach will download the sources on your machine such that you are able to use your IDE or editor of choice.
|
||||
A Docker container will be used in order to provide a controlled build environment without messing with your host system.
|
||||
|
||||
### Prepare host system
|
||||
|
||||
Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and [Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
### Development within container
|
||||
|
||||
Prepare and build container
|
||||
```
|
||||
git clone git@github.com:minio/minio.git
|
||||
cd minio
|
||||
docker build -t minio-dev -f Dockerfile.dev.browser .
|
||||
```
|
||||
|
||||
Run container, build and run core
|
||||
```sh
|
||||
docker run -it --rm --name minio-dev -v "$PWD":/minio minio-dev
|
||||
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run release
|
||||
cd /minio
|
||||
make
|
||||
./minio server /data
|
||||
```
|
||||
Note `Endpoint` IP (the one which is _not_ `127.0.0.1`), `AccessKey` and `SecretKey` (both default to `minioadmin`) in order to enter them in the browser later.
|
||||
|
||||
|
||||
Open another terminal.
|
||||
Connect to container
|
||||
```sh
|
||||
docker exec -it minio-dev bash
|
||||
```
|
||||
|
||||
Apply patch to allow access from outside container
|
||||
```sh
|
||||
cd /minio
|
||||
git apply --ignore-whitespace <<EOF
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
EOF
|
||||
```
|
||||
|
||||
Build and run frontend with auto-reload
|
||||
```sh
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
@@ -18,11 +18,13 @@ import React from "react"
|
||||
import { Route, Switch, Redirect } from "react-router-dom"
|
||||
import Browser from "./browser/Browser"
|
||||
import Login from "./browser/Login"
|
||||
import OpenIDLogin from "./browser/OpenIDLogin"
|
||||
import web from "./web"
|
||||
|
||||
export const App = () => {
|
||||
return (
|
||||
<Switch>
|
||||
<Route path={"/login/openid"} component={OpenIDLogin} />
|
||||
<Route path={"/login"} component={Login} />
|
||||
<Route path={"/:bucket?/*"} component={Browser} />
|
||||
</Switch>
|
||||
|
||||
@@ -19,7 +19,7 @@ import { Modal } from "react-bootstrap"
|
||||
import logo from "../../img/logo.svg"
|
||||
|
||||
export const AboutModal = ({ serverInfo, hideAbout }) => {
|
||||
const { version, memory, platform, runtime } = serverInfo
|
||||
const { version, platform, runtime } = serverInfo
|
||||
return (
|
||||
<Modal
|
||||
className="modal-about modal-dark"
|
||||
@@ -42,10 +42,6 @@ export const AboutModal = ({ serverInfo, hideAbout }) => {
|
||||
<div>Version</div>
|
||||
<small>{version}</small>
|
||||
</li>
|
||||
<li>
|
||||
<div>Memory</div>
|
||||
<small>{memory}</small>
|
||||
</li>
|
||||
<li>
|
||||
<div>Platform</div>
|
||||
<small>{platform}</small>
|
||||
|
||||
@@ -80,13 +80,6 @@ export class ChangePasswordModal extends React.Component {
|
||||
|
||||
generateAuth(e) {
|
||||
const { serverInfo } = this.props
|
||||
// Generate random access key only for root user
|
||||
if (!serverInfo.userInfo.isIAMUser) {
|
||||
this.setState({
|
||||
newAccessKey: getRandomAccessKey()
|
||||
})
|
||||
}
|
||||
|
||||
this.setState({
|
||||
newSecretKey: getRandomSecretKey(),
|
||||
newSecretKeyVisible: true
|
||||
@@ -95,15 +88,16 @@ export class ChangePasswordModal extends React.Component {
|
||||
|
||||
canChangePassword() {
|
||||
const { serverInfo } = this.props
|
||||
// Password change is not allowed in WORM mode
|
||||
if (serverInfo.info.isWorm) {
|
||||
// Password change is not allowed for temporary users(STS)
|
||||
if(serverInfo.userInfo.isTempUser) {
|
||||
return false
|
||||
}
|
||||
|
||||
// When credentials are set on ENV, password change not allowed for owner
|
||||
if (serverInfo.info.isEnvCreds && !serverInfo.userInfo.isIAMUser) {
|
||||
// Password change is only allowed for regular users
|
||||
if (!serverInfo.userInfo.isIAMUser) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -186,24 +180,6 @@ export class ChangePasswordModal extends React.Component {
|
||||
</div>
|
||||
|
||||
<div className="has-toggle-password m-t-30">
|
||||
{!serverInfo.userInfo.isIAMUser && (
|
||||
<InputGroup
|
||||
value={this.state.newAccessKey}
|
||||
id="newAccessKey"
|
||||
label={"New Access Key"}
|
||||
name="newAccesskey"
|
||||
type="text"
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="false"
|
||||
align="ig-left"
|
||||
onChange={e => {
|
||||
this.setState({ newAccessKey: e.target.value })
|
||||
}}
|
||||
readonly={serverInfo.userInfo.isIAMUser}
|
||||
/>
|
||||
)}
|
||||
|
||||
<i
|
||||
onClick={() => {
|
||||
this.setState({
|
||||
|
||||
@@ -16,20 +16,22 @@
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import classNames from "classnames"
|
||||
import logo from "../../img/logo.svg"
|
||||
import Alert from "../alert/Alert"
|
||||
import * as actionsAlert from "../alert/actions"
|
||||
import InputGroup from "./InputGroup"
|
||||
import web from "../web"
|
||||
import { Redirect } from "react-router-dom"
|
||||
import { Redirect, Link } from "react-router-dom"
|
||||
import OpenIDLoginButton from './OpenIDLoginButton'
|
||||
|
||||
export class Login extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
accessKey: "",
|
||||
secretKey: ""
|
||||
secretKey: "",
|
||||
discoveryDoc: {},
|
||||
clientId: ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +85,15 @@ export class Login extends React.Component {
|
||||
document.body.classList.add("is-guest")
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
web.GetDiscoveryDoc().then(({ DiscoveryDoc, clientId }) => {
|
||||
this.setState({
|
||||
clientId,
|
||||
discoveryDoc: DiscoveryDoc
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
componentWillUnmount() {
|
||||
document.body.classList.remove("is-guest")
|
||||
}
|
||||
@@ -95,6 +106,8 @@ export class Login extends React.Component {
|
||||
let alertBox = <Alert {...alert} onDismiss={clearAlert} />
|
||||
// Make sure you don't show a fading out alert box on the initial web-page load.
|
||||
if (!alert.message) alertBox = ""
|
||||
|
||||
const showOpenID = Boolean(this.state.discoveryDoc && this.state.discoveryDoc.authorization_endpoint)
|
||||
return (
|
||||
<div className="login">
|
||||
{alertBox}
|
||||
@@ -122,12 +135,32 @@ export class Login extends React.Component {
|
||||
type="password"
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
autoComplete="new-password"
|
||||
/>
|
||||
<button className="lw-btn" type="submit">
|
||||
<i className="fas fa-sign-in-alt" />
|
||||
</button>
|
||||
</form>
|
||||
{showOpenID && (
|
||||
<div className="openid-login">
|
||||
<div className="or">or</div>
|
||||
{
|
||||
this.state.clientId ? (
|
||||
<OpenIDLoginButton
|
||||
className="btn openid-btn"
|
||||
clientId={this.state.clientId}
|
||||
authEp={this.state.discoveryDoc.authorization_endpoint}
|
||||
authScopes={this.state.discoveryDoc.scopes_supported}
|
||||
>
|
||||
Log in with OpenID
|
||||
</OpenIDLoginButton>
|
||||
) : (
|
||||
<Link to={"/login/openid"} className="btn openid-btn">
|
||||
Log in with OpenID
|
||||
</Link>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<div className="l-footer">
|
||||
<a className="lf-logo" href="">
|
||||
|
||||
169
browser/app/js/browser/OpenIDLogin.js
Normal file
169
browser/app/js/browser/OpenIDLogin.js
Normal file
@@ -0,0 +1,169 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import logo from "../../img/logo.svg"
|
||||
import Alert from "../alert/Alert"
|
||||
import * as actionsAlert from "../alert/actions"
|
||||
import InputGroup from "./InputGroup"
|
||||
import web from "../web"
|
||||
import { Redirect } from "react-router-dom"
|
||||
import qs from "query-string"
|
||||
import { getRandomString } from "../utils"
|
||||
import storage from "local-storage-fallback"
|
||||
import jwtDecode from "jwt-decode"
|
||||
import { buildOpenIDAuthURL, OPEN_ID_NONCE_KEY } from './utils'
|
||||
|
||||
export class OpenIDLogin extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
clientID: "",
|
||||
discoveryDoc: {}
|
||||
}
|
||||
this.clientIDChange = this.clientIDChange.bind(this)
|
||||
this.handleSubmit = this.handleSubmit.bind(this)
|
||||
}
|
||||
|
||||
clientIDChange(e) {
|
||||
this.setState({
|
||||
clientID: e.target.value
|
||||
})
|
||||
}
|
||||
|
||||
handleSubmit(event) {
|
||||
event.preventDefault()
|
||||
const { showAlert } = this.props
|
||||
let message = ""
|
||||
if (this.state.clientID === "") {
|
||||
message = "Client ID cannot be empty"
|
||||
}
|
||||
if (message) {
|
||||
showAlert("danger", message)
|
||||
return
|
||||
}
|
||||
|
||||
if (this.state.discoveryDoc && this.state.discoveryDoc.authorization_endpoint) {
|
||||
const redirectURI = window.location.href.split("#")[0]
|
||||
|
||||
// Store nonce in localstorage to check again after the redirect
|
||||
const nonce = getRandomString(16)
|
||||
storage.setItem(OPEN_ID_NONCE_KEY, nonce)
|
||||
|
||||
const authURL = buildOpenIDAuthURL(
|
||||
this.state.discoveryDoc.authorization_endpoint,
|
||||
this.state.discoveryDoc.scopes_supported,
|
||||
redirectURI,
|
||||
this.state.clientID,
|
||||
nonce
|
||||
)
|
||||
window.location = authURL
|
||||
}
|
||||
}
|
||||
|
||||
componentWillMount() {
|
||||
const { clearAlert } = this.props
|
||||
// Clear out any stale message in the alert of previous page
|
||||
clearAlert()
|
||||
document.body.classList.add("is-guest")
|
||||
|
||||
web.GetDiscoveryDoc().then(({ DiscoveryDoc }) => {
|
||||
this.setState({
|
||||
discoveryDoc: DiscoveryDoc
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
const values = qs.parse(this.props.location.hash)
|
||||
if (values.error) {
|
||||
this.props.showAlert("danger", values.error_description)
|
||||
return
|
||||
}
|
||||
|
||||
if (values.id_token) {
|
||||
// Check nonce on the token to prevent replay attacks
|
||||
const tokenJSON = jwtDecode(values.id_token)
|
||||
if (storage.getItem(OPEN_ID_NONCE_KEY) !== tokenJSON.nonce) {
|
||||
this.props.showAlert("danger", "Invalid auth token")
|
||||
return
|
||||
}
|
||||
|
||||
web.LoginSTS({ token: values.id_token }).then(() => {
|
||||
storage.removeItem(OPEN_ID_NONCE_KEY)
|
||||
this.forceUpdate()
|
||||
return
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
componentWillUnmount() {
|
||||
document.body.classList.remove("is-guest")
|
||||
}
|
||||
|
||||
render() {
|
||||
const { clearAlert, alert } = this.props
|
||||
if (web.LoggedIn()) {
|
||||
return <Redirect to={"/"} />
|
||||
}
|
||||
let alertBox = <Alert {...alert} onDismiss={clearAlert} />
|
||||
// Make sure you don't show a fading out alert box on the initial web-page load.
|
||||
if (!alert.message) alertBox = ""
|
||||
return (
|
||||
<div className="login">
|
||||
{alertBox}
|
||||
<div className="l-wrap">
|
||||
<form onSubmit={this.handleSubmit}>
|
||||
<InputGroup
|
||||
value={this.state.clientID}
|
||||
onChange={this.clientIDChange}
|
||||
className="ig-dark"
|
||||
label="Client ID"
|
||||
id="clientID"
|
||||
name="clientID"
|
||||
type="text"
|
||||
spellCheck="false"
|
||||
required="required"
|
||||
/>
|
||||
<button className="lw-btn" type="submit">
|
||||
<i className="fas fa-sign-in-alt" />
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
<div className="l-footer">
|
||||
<a className="lf-logo" href="">
|
||||
<img src={logo} alt="" />
|
||||
</a>
|
||||
<div className="lf-server">{window.location.host}</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
showAlert: (type, message) =>
|
||||
dispatch(actionsAlert.set({ type: type, message: message })),
|
||||
clearAlert: () => dispatch(actionsAlert.clear())
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(
|
||||
state => state,
|
||||
mapDispatchToProps
|
||||
)(OpenIDLogin)
|
||||
57
browser/app/js/browser/OpenIDLoginButton.js
Normal file
57
browser/app/js/browser/OpenIDLoginButton.js
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { getRandomString } from "../utils"
|
||||
import storage from "local-storage-fallback"
|
||||
import { buildOpenIDAuthURL, OPEN_ID_NONCE_KEY } from './utils'
|
||||
|
||||
export class OpenIDLoginButton extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.handleClick = this.handleClick.bind(this)
|
||||
}
|
||||
|
||||
handleClick(event) {
|
||||
event.stopPropagation()
|
||||
const { authEp, authScopes, clientId } = this.props
|
||||
|
||||
let redirectURI = window.location.href.split("#")[0]
|
||||
if (redirectURI.endsWith('/')) {
|
||||
redirectURI += 'openid'
|
||||
} else {
|
||||
redirectURI += '/openid'
|
||||
}
|
||||
|
||||
// Store nonce in localstorage to check again after the redirect
|
||||
const nonce = getRandomString(16)
|
||||
storage.setItem(OPEN_ID_NONCE_KEY, nonce)
|
||||
|
||||
const authURL = buildOpenIDAuthURL(authEp, authScopes, redirectURI, clientId, nonce)
|
||||
window.location = authURL
|
||||
}
|
||||
|
||||
render() {
|
||||
const { children, className } = this.props
|
||||
return (
|
||||
<div onClick={this.handleClick} className={className}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export default OpenIDLoginButton
|
||||
@@ -26,6 +26,13 @@ export class StorageInfo extends React.Component {
|
||||
}
|
||||
render() {
|
||||
const { used } = this.props.storageInfo
|
||||
|
||||
if (!used) {
|
||||
return <noscript />
|
||||
}
|
||||
|
||||
const totalUsed = used.reduce((v1, v2) => v1 + v2, 0)
|
||||
|
||||
return (
|
||||
<div className="feh-used">
|
||||
<div className="fehu-chart">
|
||||
@@ -34,7 +41,7 @@ export class StorageInfo extends React.Component {
|
||||
<ul>
|
||||
<li>
|
||||
<span>Used: </span>
|
||||
{humanize.filesize(used)}
|
||||
{humanize.filesize(totalUsed)}
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -54,4 +61,7 @@ const mapDispatchToProps = dispatch => {
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(StorageInfo)
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(StorageInfo)
|
||||
|
||||
@@ -21,7 +21,6 @@ import { AboutModal } from "../AboutModal"
|
||||
describe("AboutModal", () => {
|
||||
const serverInfo = {
|
||||
version: "test",
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test"
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import { BrowserDropdown } from "../BrowserDropdown"
|
||||
describe("BrowserDropdown", () => {
|
||||
const serverInfo = {
|
||||
version: "test",
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test"
|
||||
}
|
||||
|
||||
@@ -54,32 +54,19 @@ jest.mock("../../utils", () => ({
|
||||
describe("ChangePasswordModal", () => {
|
||||
const serverInfo = {
|
||||
version: "test",
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test",
|
||||
info: { isEnvCreds: false },
|
||||
userInfo: { isIAMUser: false }
|
||||
info: {},
|
||||
userInfo: { isIAMUser: true }
|
||||
}
|
||||
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
})
|
||||
|
||||
it("should not allow changing password when isWorm is true", () => {
|
||||
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should not allow changing password when isEnvCreds is true and not IAM user", () => {
|
||||
it("should not allow changing password when not IAM user", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
info: { isEnvCreds: true },
|
||||
userInfo: { isIAMUser: false }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
@@ -91,21 +78,22 @@ describe("ChangePasswordModal", () => {
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should generate accessKey and secretKey when Generate buttons is clicked", () => {
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
wrapper.find("#generate-keys").simulate("click")
|
||||
setImmediate(() => {
|
||||
expect(wrapper.state("newAccessKey")).toBe("raccesskey")
|
||||
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
|
||||
})
|
||||
it("should not allow changing password for STS user", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
userInfo: { isTempUser: true }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should not generate accessKey for IAM User", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
userInfo: { isIAMUser: true }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
wrapper.find("#generate-keys").simulate("click")
|
||||
setImmediate(() => {
|
||||
expect(wrapper.state("newAccessKey")).toBe("minio")
|
||||
@@ -114,59 +102,24 @@ describe("ChangePasswordModal", () => {
|
||||
})
|
||||
|
||||
it("should not show new accessKey field for IAM User", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
userInfo: { isIAMUser: true }
|
||||
}
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
expect(wrapper.find("#newAccesskey").exists()).toBeFalsy()
|
||||
})
|
||||
|
||||
it("should disble Update button for invalid accessKey or secretKey", () => {
|
||||
it("should disable Update button for secretKey", () => {
|
||||
const showAlert = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
|
||||
)
|
||||
wrapper
|
||||
.find("#currentAccessKey")
|
||||
.simulate("change", { target: { value: "minio" } })
|
||||
wrapper
|
||||
.find("#currentSecretKey")
|
||||
.simulate("change", { target: { value: "minio123" } })
|
||||
wrapper.find("#newAccessKey").simulate("change", { target: { value: "t" } })
|
||||
wrapper
|
||||
.find("#newSecretKey")
|
||||
.simulate("change", { target: { value: "t1" } })
|
||||
expect(wrapper.find("#update-keys").prop("disabled")).toBeTruthy()
|
||||
})
|
||||
|
||||
it("should update accessKey and secretKey when Update button is clicked", () => {
|
||||
const showAlert = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
|
||||
)
|
||||
wrapper
|
||||
.find("#currentAccessKey")
|
||||
.simulate("change", { target: { value: "minio" } })
|
||||
wrapper
|
||||
.find("#currentSecretKey")
|
||||
.simulate("change", { target: { value: "minio123" } })
|
||||
wrapper
|
||||
.find("#newAccessKey")
|
||||
.simulate("change", { target: { value: "test" } })
|
||||
wrapper
|
||||
.find("#newSecretKey")
|
||||
.simulate("change", { target: { value: "test1234" } })
|
||||
expect(wrapper.find("#update-keys").prop("disabled")).toBeFalsy()
|
||||
wrapper.find("#update-keys").simulate("click")
|
||||
setImmediate(() => {
|
||||
expect(showAlert).toHaveBeenCalledWith({
|
||||
type: "success",
|
||||
message: "Credentials updated successfully."
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it("should call hideChangePassword when Cancel button is clicked", () => {
|
||||
const hideChangePassword = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
@@ -19,21 +19,24 @@ import { shallow, mount } from "enzyme"
|
||||
import { Login } from "../Login"
|
||||
import web from "../../web"
|
||||
|
||||
jest.mock('../../web', () => ({
|
||||
jest.mock("../../web", () => ({
|
||||
Login: jest.fn(() => {
|
||||
return Promise.resolve({ token: "test", uiVersion: "2018-02-01T01:17:47Z" })
|
||||
}),
|
||||
LoggedIn: jest.fn()
|
||||
LoggedIn: jest.fn(),
|
||||
GetDiscoveryDoc: jest.fn(() => {
|
||||
return Promise.resolve({ DiscoveryDoc: {"authorization_endpoint": "test"} })
|
||||
})
|
||||
}))
|
||||
|
||||
describe("Login", () => {
|
||||
const dispatchMock = jest.fn()
|
||||
const showAlertMock = jest.fn()
|
||||
const clearAlertMock = jest.fn()
|
||||
|
||||
|
||||
it("should render without crashing", () => {
|
||||
shallow(<Login
|
||||
dispatch={dispatchMock}
|
||||
shallow(<Login
|
||||
dispatch={dispatchMock}
|
||||
alert={{ show: false, type: "danger"}}
|
||||
showAlert={showAlertMock}
|
||||
clearAlert={clearAlertMock}
|
||||
@@ -42,8 +45,8 @@ describe("Login", () => {
|
||||
|
||||
it("should initially have the is-guest class", () => {
|
||||
const wrapper = shallow(
|
||||
<Login
|
||||
dispatch={dispatchMock}
|
||||
<Login
|
||||
dispatch={dispatchMock}
|
||||
alert={{ show: false, type: "danger"}}
|
||||
showAlert={showAlertMock}
|
||||
clearAlert={clearAlertMock}
|
||||
@@ -55,8 +58,8 @@ describe("Login", () => {
|
||||
|
||||
it("should throw an alert if the keys are empty in login form", () => {
|
||||
const wrapper = mount(
|
||||
<Login
|
||||
dispatch={dispatchMock}
|
||||
<Login
|
||||
dispatch={dispatchMock}
|
||||
alert={{ show: false, type: "danger"}}
|
||||
showAlert={showAlertMock}
|
||||
clearAlert={clearAlertMock}
|
||||
@@ -85,8 +88,8 @@ describe("Login", () => {
|
||||
|
||||
it("should call web.Login with correct arguments if both keys are entered", () => {
|
||||
const wrapper = mount(
|
||||
<Login
|
||||
dispatch={dispatchMock}
|
||||
<Login
|
||||
dispatch={dispatchMock}
|
||||
alert={{ show: false, type: "danger"}}
|
||||
showAlert={showAlertMock}
|
||||
clearAlert={clearAlertMock}
|
||||
@@ -98,7 +101,7 @@ describe("Login", () => {
|
||||
})
|
||||
wrapper.find("form").simulate("submit")
|
||||
expect(web.Login).toHaveBeenCalledWith({
|
||||
"username": "accessKey",
|
||||
"username": "accessKey",
|
||||
"password": "secretKey"
|
||||
})
|
||||
})
|
||||
|
||||
@@ -21,10 +21,7 @@ import { StorageInfo } from "../StorageInfo"
|
||||
describe("StorageInfo", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: 60 }}
|
||||
fetchStorageInfo={jest.fn()}
|
||||
/>
|
||||
<StorageInfo storageInfo={{ used: [60] }} fetchStorageInfo={jest.fn()} />
|
||||
)
|
||||
})
|
||||
|
||||
@@ -32,10 +29,21 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: 60 }}
|
||||
storageInfo={{ used: [60] }}
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
expect(fetchStorageInfo).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should not render anything if used is null", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: null }}
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.text()).toBe("")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -20,12 +20,11 @@ import * as actionsCommon from "../actions"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
StorageInfo: jest.fn(() => {
|
||||
return Promise.resolve({ storageInfo: { Used: 60 } })
|
||||
return Promise.resolve({ storageInfo: { Used: [60] } })
|
||||
}),
|
||||
ServerInfo: jest.fn(() => {
|
||||
return Promise.resolve({
|
||||
MinioVersion: "test",
|
||||
MinioMemory: "test",
|
||||
MinioPlatform: "test",
|
||||
MinioRuntime: "test",
|
||||
MinioGlobalInfo: "test"
|
||||
@@ -40,7 +39,7 @@ describe("Common actions", () => {
|
||||
it("creates common/SET_STORAGE_INFO after fetching the storage details ", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: 60 } }
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: [60] } }
|
||||
]
|
||||
return store.dispatch(actionsCommon.fetchStorageInfo()).then(() => {
|
||||
const actions = store.getActions()
|
||||
@@ -55,7 +54,6 @@ describe("Common actions", () => {
|
||||
type: "common/SET_SERVER_INFO",
|
||||
serverInfo: {
|
||||
version: "test",
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test",
|
||||
info: "test"
|
||||
|
||||
@@ -22,8 +22,9 @@ describe("common reducer", () => {
|
||||
expect(reducer(undefined, {})).toEqual({
|
||||
sidebarOpen: false,
|
||||
storageInfo: {
|
||||
total: 0,
|
||||
free: 0
|
||||
total: [0],
|
||||
free: [0],
|
||||
used: [0]
|
||||
},
|
||||
serverInfo: {}
|
||||
})
|
||||
@@ -61,11 +62,11 @@ describe("common reducer", () => {
|
||||
{},
|
||||
{
|
||||
type: actionsCommon.SET_STORAGE_INFO,
|
||||
storageInfo: { total: 100, free: 40 }
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
}
|
||||
)
|
||||
).toEqual({
|
||||
storageInfo: { total: 100, free: 40 }
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
})
|
||||
})
|
||||
|
||||
@@ -75,7 +76,6 @@ describe("common reducer", () => {
|
||||
type: actionsCommon.SET_SERVER_INFO,
|
||||
serverInfo: {
|
||||
version: "test",
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test",
|
||||
info: "test"
|
||||
@@ -83,7 +83,6 @@ describe("common reducer", () => {
|
||||
}).serverInfo
|
||||
).toEqual({
|
||||
version: "test",
|
||||
memory: "test",
|
||||
platform: "test",
|
||||
runtime: "test",
|
||||
info: "test"
|
||||
|
||||
@@ -51,7 +51,6 @@ export const fetchServerInfo = () => {
|
||||
return web.ServerInfo().then(res => {
|
||||
const serverInfo = {
|
||||
version: res.MinioVersion,
|
||||
memory: res.MinioMemory,
|
||||
platform: res.MinioPlatform,
|
||||
runtime: res.MinioRuntime,
|
||||
info: res.MinioGlobalInfo,
|
||||
|
||||
@@ -19,7 +19,7 @@ import * as actionsCommon from "./actions"
|
||||
export default (
|
||||
state = {
|
||||
sidebarOpen: false,
|
||||
storageInfo: { total: 0, free: 0 },
|
||||
storageInfo: { total: [0], free: [0], used: [0] },
|
||||
serverInfo: {}
|
||||
},
|
||||
action
|
||||
|
||||
24
browser/app/js/browser/selectors.js
Normal file
24
browser/app/js/browser/selectors.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { createSelector } from "reselect"
|
||||
|
||||
export const getServerInfo = state => state.browser.serverInfo
|
||||
|
||||
export const hasServerPublicDomain = createSelector(
|
||||
getServerInfo,
|
||||
serverInfo => Boolean(serverInfo.info && serverInfo.info.domains && serverInfo.info.domains.length),
|
||||
)
|
||||
28
browser/app/js/browser/utils.js
Normal file
28
browser/app/js/browser/utils.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
export const OPEN_ID_NONCE_KEY = 'openIDKey'
|
||||
|
||||
export const buildOpenIDAuthURL = (authEp, authScopes, redirectURI, clientID, nonce) => {
|
||||
const params = new URLSearchParams()
|
||||
params.set("response_type", "id_token")
|
||||
params.set("scope", authScopes.join(" "))
|
||||
params.set("client_id", clientID)
|
||||
params.set("redirect_uri", redirectURI)
|
||||
params.set("nonce", nonce)
|
||||
|
||||
return `${authEp}?${params.toString()}`
|
||||
}
|
||||
@@ -17,14 +17,29 @@
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import { Scrollbars } from "react-custom-scrollbars"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import * as actionsBuckets from "./actions"
|
||||
import { getVisibleBuckets } from "./selectors"
|
||||
import { getFilteredBuckets } from "./selectors"
|
||||
import BucketContainer from "./BucketContainer"
|
||||
import web from "../web"
|
||||
import history from "../history"
|
||||
import { pathSlice } from "../utils"
|
||||
|
||||
export class BucketList extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
page: 1
|
||||
}
|
||||
this.loadNextPage = this.loadNextPage.bind(this)
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
componentWillMount() {
|
||||
const { fetchBuckets, setBucketList, selectBucket } = this.props
|
||||
if (web.LoggedIn()) {
|
||||
@@ -39,18 +54,33 @@ export class BucketList extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState({
|
||||
page: this.state.page + 1
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { visibleBuckets } = this.props
|
||||
const { filteredBuckets } = this.props
|
||||
const visibleBuckets = filteredBuckets.slice(0, this.state.page * 100)
|
||||
return (
|
||||
<div className="fesl-inner">
|
||||
<Scrollbars
|
||||
renderTrackVertical={props => <div className="scrollbar-vertical" />}
|
||||
>
|
||||
<ul>
|
||||
{visibleBuckets.map(bucket => (
|
||||
<BucketContainer key={bucket} bucket={bucket} />
|
||||
))}
|
||||
</ul>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={filteredBuckets.length > visibleBuckets.length}
|
||||
useWindow={false}
|
||||
element="div"
|
||||
initialLoad={false}
|
||||
>
|
||||
<ul>
|
||||
{visibleBuckets.map(bucket => (
|
||||
<BucketContainer key={bucket} bucket={bucket} />
|
||||
))}
|
||||
</ul>
|
||||
</InfiniteScroll>
|
||||
</Scrollbars>
|
||||
</div>
|
||||
)
|
||||
@@ -59,7 +89,8 @@ export class BucketList extends React.Component {
|
||||
|
||||
const mapStateToProps = state => {
|
||||
return {
|
||||
visibleBuckets: getVisibleBuckets(state)
|
||||
filteredBuckets: getFilteredBuckets(state),
|
||||
filter: state.buckets.filter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,13 +29,13 @@ jest.mock("../../web", () => ({
|
||||
describe("BucketList", () => {
|
||||
it("should render without crashing", () => {
|
||||
const fetchBuckets = jest.fn()
|
||||
shallow(<BucketList visibleBuckets={[]} fetchBuckets={fetchBuckets} />)
|
||||
shallow(<BucketList filteredBuckets={[]} fetchBuckets={fetchBuckets} />)
|
||||
})
|
||||
|
||||
it("should call fetchBuckets before component is mounted", () => {
|
||||
const fetchBuckets = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<BucketList visibleBuckets={[]} fetchBuckets={fetchBuckets} />
|
||||
<BucketList filteredBuckets={[]} fetchBuckets={fetchBuckets} />
|
||||
)
|
||||
expect(fetchBuckets).toHaveBeenCalled()
|
||||
})
|
||||
@@ -46,7 +46,7 @@ describe("BucketList", () => {
|
||||
history.push("/bk1/pre1")
|
||||
const wrapper = shallow(
|
||||
<BucketList
|
||||
visibleBuckets={[]}
|
||||
filteredBuckets={[]}
|
||||
setBucketList={setBucketList}
|
||||
selectBucket={selectBucket}
|
||||
/>
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { getVisibleBuckets, getCurrentBucket } from "../selectors"
|
||||
import { getFilteredBuckets, getCurrentBucket } from "../selectors"
|
||||
|
||||
describe("getVisibleBuckets", () => {
|
||||
describe("getFilteredBuckets", () => {
|
||||
let state
|
||||
beforeEach(() => {
|
||||
state = {
|
||||
@@ -28,11 +28,11 @@ describe("getVisibleBuckets", () => {
|
||||
|
||||
it("should return all buckets if no filter specified", () => {
|
||||
state.buckets.filter = ""
|
||||
expect(getVisibleBuckets(state)).toEqual(["test1", "test11", "test2"])
|
||||
expect(getFilteredBuckets(state)).toEqual(["test1", "test11", "test2"])
|
||||
})
|
||||
|
||||
it("should return all matching buckets if filter is specified", () => {
|
||||
state.buckets.filter = "test1"
|
||||
expect(getVisibleBuckets(state)).toEqual(["test1", "test11"])
|
||||
expect(getFilteredBuckets(state)).toEqual(["test1", "test11"])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ import { createSelector } from "reselect"
|
||||
const bucketsSelector = state => state.buckets.list
|
||||
const bucketsFilterSelector = state => state.buckets.filter
|
||||
|
||||
export const getVisibleBuckets = createSelector(
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
|
||||
@@ -14,30 +14,67 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import mimedb from 'mime-types'
|
||||
import mimedb from "mime-types"
|
||||
|
||||
const isFolder = (name, contentType) => {
|
||||
if (name.endsWith('/')) return true
|
||||
if (name.endsWith("/")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isPdf = (name, contentType) => {
|
||||
if (contentType === 'application/pdf') return true
|
||||
if (contentType === "application/pdf") return true
|
||||
return false
|
||||
}
|
||||
const isImage = (name, contentType) => {
|
||||
if (
|
||||
contentType === "image/jpeg" ||
|
||||
contentType === "image/gif" ||
|
||||
contentType === "image/x-icon" ||
|
||||
contentType === "image/png" ||
|
||||
contentType === "image/svg+xml" ||
|
||||
contentType === "image/tiff" ||
|
||||
contentType === "image/webp"
|
||||
)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isZip = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[1].includes('zip')) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[1].includes("zip")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isCode = (name, contentType) => {
|
||||
const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs',
|
||||
'php', 'css', 'less', 'scss', 'coffee', 'net', 'html',
|
||||
'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp',
|
||||
'asp', 'aspx']
|
||||
const ext = name.split('.').reverse()[0]
|
||||
const codeExt = [
|
||||
"c",
|
||||
"cpp",
|
||||
"go",
|
||||
"py",
|
||||
"java",
|
||||
"rb",
|
||||
"js",
|
||||
"pl",
|
||||
"fs",
|
||||
"php",
|
||||
"css",
|
||||
"less",
|
||||
"scss",
|
||||
"coffee",
|
||||
"net",
|
||||
"html",
|
||||
"rs",
|
||||
"exs",
|
||||
"scala",
|
||||
"hs",
|
||||
"clj",
|
||||
"el",
|
||||
"scm",
|
||||
"lisp",
|
||||
"asp",
|
||||
"aspx",
|
||||
]
|
||||
const ext = name.split(".").reverse()[0]
|
||||
for (var i in codeExt) {
|
||||
if (ext === codeExt[i]) return true
|
||||
}
|
||||
@@ -45,9 +82,9 @@ const isCode = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isExcel = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['excel', 'spreadsheet']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["excel", "spreadsheet"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -55,9 +92,9 @@ const isExcel = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isDoc = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['word', '.document']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["word", ".document"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -65,9 +102,9 @@ const isDoc = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isPresentation = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
var types = ['powerpoint', 'presentation']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
var types = ["powerpoint", "presentation"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -76,31 +113,32 @@ const isPresentation = (name, contentType) => {
|
||||
|
||||
const typeToIcon = (type) => {
|
||||
return (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[0] === type) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[0] === type) return true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export const getDataType = (name, contentType) => {
|
||||
if (contentType === "") {
|
||||
contentType = mimedb.lookup(name) || 'application/octet-stream'
|
||||
contentType = mimedb.lookup(name) || "application/octet-stream"
|
||||
}
|
||||
const check = [
|
||||
['folder', isFolder],
|
||||
['code', isCode],
|
||||
['audio', typeToIcon('audio')],
|
||||
['image', typeToIcon('image')],
|
||||
['video', typeToIcon('video')],
|
||||
['text', typeToIcon('text')],
|
||||
['pdf', isPdf],
|
||||
['zip', isZip],
|
||||
['excel', isExcel],
|
||||
['doc', isDoc],
|
||||
['presentation', isPresentation]
|
||||
["folder", isFolder],
|
||||
["code", isCode],
|
||||
["audio", typeToIcon("audio")],
|
||||
["image", typeToIcon("image")],
|
||||
["video", typeToIcon("video")],
|
||||
["text", typeToIcon("text")],
|
||||
["pdf", isPdf],
|
||||
["image", isImage],
|
||||
["zip", isZip],
|
||||
["excel", isExcel],
|
||||
["doc", isDoc],
|
||||
["presentation", isPresentation],
|
||||
]
|
||||
for (var i in check) {
|
||||
if (check[i][1](name, contentType)) return check[i][0]
|
||||
}
|
||||
return 'other'
|
||||
return "other"
|
||||
}
|
||||
|
||||
@@ -19,18 +19,22 @@ import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import ShareObjectModal from "./ShareObjectModal"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import PreviewObjectModal from "./PreviewObjectModal"
|
||||
|
||||
import * as objectsActions from "./actions"
|
||||
import { getDataType } from "../mime.js"
|
||||
import {
|
||||
SHARE_OBJECT_EXPIRY_DAYS,
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
SHARE_OBJECT_EXPIRY_MINUTES,
|
||||
} from "../constants"
|
||||
|
||||
export class ObjectActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
showPreview: false,
|
||||
}
|
||||
}
|
||||
shareObject(e) {
|
||||
@@ -43,6 +47,11 @@ export class ObjectActions extends React.Component {
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
)
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadObject } = this.props
|
||||
downloadObject(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
@@ -53,7 +62,20 @@ export class ObjectActions extends React.Component {
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
getObjectURL(objectname, callback) {
|
||||
const { getObjectURL } = this.props
|
||||
getObjectURL(objectname, callback)
|
||||
}
|
||||
showPreviewModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showPreview: true })
|
||||
}
|
||||
hidePreviewModal() {
|
||||
this.setState({
|
||||
showPreview: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
@@ -65,26 +87,54 @@ export class ObjectActions extends React.Component {
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Share"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fas fa-share-alt" />
|
||||
</a>
|
||||
{getDataType(object.name, object.contentType) == "image" && (
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Preview"
|
||||
onClick={this.showPreviewModal.bind(this)}
|
||||
>
|
||||
<i className="far fa-file-image" />
|
||||
</a>
|
||||
)}
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
<ShareObjectModal object={object} />}
|
||||
{showShareObjectModal && shareObjectName === object.name && (
|
||||
<ShareObjectModal object={object} />
|
||||
)}
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
{this.state.showPreview && (
|
||||
<PreviewObjectModal
|
||||
object={object}
|
||||
hidePreviewModal={this.hidePreviewModal.bind(this)}
|
||||
getObjectURL={this.getObjectURL.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
@@ -94,15 +144,18 @@ const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
showShareObjectModal: state.objects.shareObject.show,
|
||||
shareObjectName: state.objects.shareObject.object
|
||||
shareObjectName: state.objects.shareObject.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadObject: object => dispatch(objectsActions.downloadObject(object)),
|
||||
shareObject: (object, days, hours, minutes) =>
|
||||
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
|
||||
deleteObject: object => dispatch(objectsActions.deleteObject(object))
|
||||
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
|
||||
getObjectURL: (object, callback) =>
|
||||
dispatch(objectsActions.getObjectURL(object, callback)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
95
browser/app/js/objects/PrefixActions.js
Normal file
95
browser/app/js/objects/PrefixActions.js
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import * as actions from "./actions"
|
||||
|
||||
export class PrefixActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false,
|
||||
}
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadPrefix } = this.props
|
||||
downloadPrefix(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
}
|
||||
showDeleteConfirmModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showDeleteConfirmation: true })
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { object, showShareObjectModal, shareObjectName } = this.props
|
||||
return (
|
||||
<Dropdown id={`obj-actions-${object.name}`}>
|
||||
<Dropdown.Toggle noCaret className="fia-toggle" />
|
||||
<Dropdown.Menu>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download as zip"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadPrefix: object => dispatch(actions.downloadPrefix(object)),
|
||||
deleteObject: (object) => dispatch(actions.deleteObject(object)),
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(PrefixActions)
|
||||
@@ -17,22 +17,32 @@
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import ObjectItem from "./ObjectItem"
|
||||
import PrefixActions from "./PrefixActions"
|
||||
import * as actionsObjects from "./actions"
|
||||
import { getCheckedList } from "./selectors"
|
||||
|
||||
export const PrefixContainer = ({ object, currentPrefix, selectPrefix }) => {
|
||||
export const PrefixContainer = ({
|
||||
object,
|
||||
currentPrefix,
|
||||
checkedObjectsCount,
|
||||
selectPrefix
|
||||
}) => {
|
||||
const props = {
|
||||
name: object.name,
|
||||
contentType: object.contentType,
|
||||
onClick: () => selectPrefix(`${currentPrefix}${object.name}`)
|
||||
}
|
||||
|
||||
if (checkedObjectsCount == 0) {
|
||||
props.actionButtons = <PrefixActions object={object} />
|
||||
}
|
||||
return <ObjectItem {...props} />
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
currentPrefix: state.objects.currentPrefix
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
checkedObjectsCount: getCheckedList(state).length
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { Modal, ModalHeader, ModalBody } from "react-bootstrap"
|
||||
|
||||
class PreviewObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
url: "",
|
||||
}
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.props.getObjectURL(this.props.object.name, (url) => {
|
||||
this.setState({
|
||||
url: url,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
render() {
|
||||
const { hidePreviewModal } = this.props
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
animation={false}
|
||||
onHide={hidePreviewModal}
|
||||
bsSize="large"
|
||||
>
|
||||
<ModalHeader>Preview</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group">
|
||||
{this.state.url && (
|
||||
<img
|
||||
alt="Image broken"
|
||||
src={this.state.url}
|
||||
style={{ display: "block", width: "100%" }}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
{
|
||||
<button className="btn btn-link" onClick={hidePreviewModal}>
|
||||
Cancel
|
||||
</button>
|
||||
}
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
}
|
||||
export default PreviewObjectModal
|
||||
@@ -77,7 +77,8 @@ export class ShareObjectModal extends React.Component {
|
||||
hideShareObject()
|
||||
}
|
||||
render() {
|
||||
const { shareObjectDetails, shareObject, hideShareObject } = this.props
|
||||
const { shareObjectDetails, hideShareObject } = this.props
|
||||
const url = `${window.location.protocol}//${shareObjectDetails.url}`
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
@@ -93,11 +94,12 @@ export class ShareObjectModal extends React.Component {
|
||||
type="text"
|
||||
ref={node => (this.copyTextInput = node)}
|
||||
readOnly="readOnly"
|
||||
value={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
value={url}
|
||||
onClick={() => this.copyTextInput.select()}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
{shareObjectDetails.showExpiryDate && (
|
||||
<div
|
||||
className="input-group"
|
||||
style={{ display: web.LoggedIn() ? "block" : "none" }}
|
||||
>
|
||||
@@ -174,10 +176,11 @@ export class ShareObjectModal extends React.Component {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<CopyToClipboard
|
||||
text={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
text={url}
|
||||
onCopy={this.onUrlCopied.bind(this)}
|
||||
>
|
||||
<button className="btn btn-success">Copy Link</button>
|
||||
|
||||
@@ -66,6 +66,63 @@ describe("ObjectActions", () => {
|
||||
expect(deleteObject).toHaveBeenCalledWith("obj1")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadObject when single object is selected and download button is clicked", () => {
|
||||
const downloadObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadObject={downloadObject} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadObject).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
|
||||
it("should show PreviewObjectModal when preview action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1", contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showPreview")).toBeTruthy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide PreviewObjectModal when cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" , contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("PreviewObjectModal").prop("hidePreviewModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showPreview")).toBeFalsy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(0)
|
||||
})
|
||||
it("should not show PreviewObjectModal when preview action is clicked if object is not an image", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
expect(wrapper
|
||||
.find("a")
|
||||
.length).toBe(3) // find only the other 2
|
||||
})
|
||||
|
||||
it("should call shareObject with object and expiry", () => {
|
||||
const shareObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { PrefixActions } from "../PrefixActions"
|
||||
|
||||
describe("PrefixActions", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />)
|
||||
})
|
||||
|
||||
it("should show DeleteObjectConfirmModal when delete action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeTruthy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide DeleteObjectConfirmModal when Cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("hideDeleteConfirmModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeFalsy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(0)
|
||||
})
|
||||
|
||||
it("should call deleteObject with object name", () => {
|
||||
const deleteObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
deleteObject={deleteObject}
|
||||
/>
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("deleteObject")()
|
||||
expect(deleteObject).toHaveBeenCalledWith("abc/")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadPrefix when single object is selected and download button is clicked", () => {
|
||||
const downloadPrefix = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadPrefix={downloadPrefix} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.first()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadPrefix).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -41,4 +41,22 @@ describe("PrefixContainer", () => {
|
||||
wrapper.find("Connect(ObjectItem)").prop("onClick")()
|
||||
expect(selectPrefix).toHaveBeenCalledWith("xyz/abc/")
|
||||
})
|
||||
|
||||
it("should pass actions to ObjectItem", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={0} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).not.toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
|
||||
it("should pass empty actions to ObjectItem when checkedObjectCount is more than 0", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={1} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -34,7 +34,7 @@ describe("ShareObjectModal", () => {
|
||||
shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
})
|
||||
@@ -44,7 +44,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
/>
|
||||
)
|
||||
@@ -59,7 +59,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
@@ -76,7 +76,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
showCopyAlert={showCopyAlert}
|
||||
/>
|
||||
@@ -89,8 +89,15 @@ describe("ShareObjectModal", () => {
|
||||
describe("Update expiry values", () => {
|
||||
const props = {
|
||||
object: { name: "obj1" },
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test" }
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test", showExpiryDate: true }
|
||||
}
|
||||
|
||||
it("should not show expiry values if shared with public link", () => {
|
||||
const shareObjectDetails = { show: true, object: "obj1", url: "test", showExpiryDate: false }
|
||||
const wrapper = shallow(<ShareObjectModal {...props} shareObjectDetails={shareObjectDetails} />)
|
||||
expect(wrapper.find('.set-expire').exists()).toEqual(false)
|
||||
})
|
||||
|
||||
it("should have default expiry values", () => {
|
||||
const wrapper = shallow(<ShareObjectModal {...props} />)
|
||||
expect(wrapper.state("expiry")).toEqual({
|
||||
|
||||
@@ -34,6 +34,7 @@ jest.mock("../../web", () => ({
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
@@ -70,6 +71,16 @@ jest.mock("../../web", () => ({
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
})
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
}),
|
||||
GetBucketPolicy: jest.fn(({ bucketName, prefix }) => {
|
||||
if (!bucketName) {
|
||||
return Promise.reject({ message: "Invalid bucket" })
|
||||
}
|
||||
if (bucketName === 'test-public') return Promise.resolve({ policy: 'readonly' })
|
||||
return Promise.resolve({})
|
||||
})
|
||||
}))
|
||||
|
||||
const middlewares = [thunk]
|
||||
@@ -295,7 +306,8 @@ describe("Objects actions", () => {
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "b.txt",
|
||||
url: "test"
|
||||
url: "test",
|
||||
showExpiryDate: true
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.showShareObject("b.txt", "test"))
|
||||
@@ -321,14 +333,16 @@ describe("Objects actions", () => {
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "https://test.com/bk1/pre1/b.txt"
|
||||
url: "https://test.com/bk1/pre1/b.txt",
|
||||
showExpiryDate: true
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
@@ -347,10 +361,42 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared with public link", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-public" },
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: { info: { domains: ['public.com'] }} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "public.com/test-public/pre1/a.txt",
|
||||
showExpiryDate: false
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "success",
|
||||
message: "Object shared.",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store
|
||||
.dispatch(actionsObjects.shareObject("a.txt", 1, 0, 0))
|
||||
.then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates alert/SET when shareObject is failed", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
@@ -442,6 +488,34 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("should download prefix", () => {
|
||||
const open = jest.fn()
|
||||
const send = jest.fn()
|
||||
const xhrMockClass = () => ({
|
||||
open: open,
|
||||
send: send
|
||||
})
|
||||
window.XMLHttpRequest = jest.fn().mockImplementation(xhrMockClass)
|
||||
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
})
|
||||
return store.dispatch(actionsObjects.downloadPrefix("pre2/")).then(() => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=test`
|
||||
expect(open).toHaveBeenCalledWith("POST", requestUrl, true)
|
||||
expect(send).toHaveBeenCalledWith(
|
||||
JSON.stringify({
|
||||
bucketName: "bk1",
|
||||
prefix: "pre1/",
|
||||
objects: ["pre2/"]
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/CHECKED_LIST_ADD action", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
|
||||
@@ -19,20 +19,20 @@ import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
sortObjectsByDate,
|
||||
} from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
SORT_BY_SIZE,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_ASC,
|
||||
SORT_ORDER_DESC
|
||||
SORT_ORDER_DESC,
|
||||
} from "../constants"
|
||||
import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
@@ -48,35 +48,35 @@ export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
|
||||
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
|
||||
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
|
||||
|
||||
export const setList = objects => ({
|
||||
export const setList = (objects) => ({
|
||||
type: SET_LIST,
|
||||
objects
|
||||
objects,
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setListLoading = listLoading => ({
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading
|
||||
listLoading,
|
||||
})
|
||||
|
||||
export const fetchObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(resetList())
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix }
|
||||
objects: { currentPrefix },
|
||||
} = getState()
|
||||
if (currentBucket) {
|
||||
dispatch(setListLoading(true))
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix
|
||||
prefix: currentPrefix,
|
||||
})
|
||||
.then(res => {
|
||||
.then((res) => {
|
||||
// we need to check if the bucket name and prefix are the same as
|
||||
// when the request was made before updating the displayed objects
|
||||
if (
|
||||
@@ -85,10 +85,10 @@ export const fetchObjects = () => {
|
||||
) {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
objects = res.objects.map((object) => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
name: object.name.replace(currentPrefix, ""),
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -104,13 +104,13 @@ export const fetchObjects = () => {
|
||||
dispatch(setListLoading(false))
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
if (web.LoggedIn()) {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
dispatch(resetList())
|
||||
@@ -123,8 +123,8 @@ export const fetchObjects = () => {
|
||||
}
|
||||
}
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
export const sortObjects = (sortBy) => {
|
||||
return function (dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
let sortOrder = SORT_ORDER_ASC
|
||||
// Reverse sort order if the list is already sorted on same field
|
||||
@@ -149,18 +149,18 @@ const sortObjectsList = (list, sortBy, sortOrder) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setSortBy = sortBy => ({
|
||||
export const setSortBy = (sortBy) => ({
|
||||
type: SET_SORT_BY,
|
||||
sortBy
|
||||
sortBy,
|
||||
})
|
||||
|
||||
export const setSortOrder = sortOrder => ({
|
||||
export const setSortOrder = (sortOrder) => ({
|
||||
type: SET_SORT_ORDER,
|
||||
sortOrder
|
||||
sortOrder,
|
||||
})
|
||||
|
||||
export const selectPrefix = prefix => {
|
||||
return function(dispatch, getState) {
|
||||
export const selectPrefix = (prefix) => {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(setCurrentPrefix(prefix))
|
||||
dispatch(fetchObjects())
|
||||
dispatch(resetCheckedList())
|
||||
@@ -169,49 +169,49 @@ export const selectPrefix = prefix => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setCurrentPrefix = prefix => {
|
||||
export const setCurrentPrefix = (prefix) => {
|
||||
return {
|
||||
type: SET_CURRENT_PREFIX,
|
||||
prefix
|
||||
prefix,
|
||||
}
|
||||
}
|
||||
|
||||
export const setPrefixWritable = prefixWritable => ({
|
||||
export const setPrefixWritable = (prefixWritable) => ({
|
||||
type: SET_PREFIX_WRITABLE,
|
||||
prefixWritable
|
||||
prefixWritable,
|
||||
})
|
||||
|
||||
export const deleteObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const deleteObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
return web
|
||||
.RemoveObject({
|
||||
bucketName: currentBucket,
|
||||
objects: [objectName]
|
||||
objects: [objectName],
|
||||
})
|
||||
.then(() => {
|
||||
dispatch(removeObject(object))
|
||||
})
|
||||
.catch(e => {
|
||||
.catch((e) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: e.message
|
||||
message: e.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export const removeObject = object => ({
|
||||
export const removeObject = (object) => ({
|
||||
type: REMOVE,
|
||||
object
|
||||
object,
|
||||
})
|
||||
|
||||
export const deleteCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const checkedObjects = getCheckedList(getState())
|
||||
for (let i = 0; i < checkedObjects.length; i++) {
|
||||
dispatch(deleteObject(checkedObjects[i]))
|
||||
@@ -221,33 +221,52 @@ export const deleteCheckedObjects = () => {
|
||||
}
|
||||
|
||||
export const shareObject = (object, days, hours, minutes) => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const hasServerDomain = hasServerPublicDomain(getState())
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
.GetBucketPolicy({ bucketName: currentBucket, prefix: currentPrefix })
|
||||
.catch(() => ({ policy: null }))
|
||||
.then(({ policy }) => {
|
||||
if (hasServerDomain && ['readonly', 'readwrite'].includes(policy)) {
|
||||
const domain = getServerInfo(getState()).info.domains[0]
|
||||
const url = `${domain}/${currentBucket}/${encodeURI(objectName)}`
|
||||
dispatch(showShareObject(object, url, false))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "Object shared."
|
||||
})
|
||||
)
|
||||
} else {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
})
|
||||
}
|
||||
})
|
||||
.then(obj => {
|
||||
.then((obj) => {
|
||||
if (!obj) return
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`,
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
@@ -265,29 +284,29 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
message: `Object shared.`,
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const showShareObject = (object, url) => ({
|
||||
export const showShareObject = (object, url, showExpiryDate = true) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: true,
|
||||
object,
|
||||
url
|
||||
url,
|
||||
showExpiryDate,
|
||||
})
|
||||
|
||||
export const hideShareObject = (object, url) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: false,
|
||||
object: "",
|
||||
url: ""
|
||||
url: "",
|
||||
})
|
||||
|
||||
export const downloadObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const getObjectURL = (object, callback) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
@@ -295,78 +314,119 @@ export const downloadObject = object => {
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${
|
||||
res.token
|
||||
}`
|
||||
window.location = url
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
callback(url)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
callback(url)
|
||||
}
|
||||
}
|
||||
}
|
||||
export const downloadObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const encObjectName = encodeURI(objectName)
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
window.location = url
|
||||
})
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const checkObject = object => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object
|
||||
})
|
||||
|
||||
export const uncheckObject = object => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
const state = getState()
|
||||
const req = {
|
||||
bucketName: getCurrentBucket(state),
|
||||
prefix: getCurrentPrefix(state),
|
||||
objects: getCheckedList(state)
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
}
|
||||
export const downloadPrefix = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
[object],
|
||||
`${object.slice(0, -1)}.zip`,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, dispatch) => {
|
||||
|
||||
export const checkObject = (object) => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object,
|
||||
})
|
||||
|
||||
export const uncheckObject = (object) => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object,
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET,
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
getCheckedList(getState()),
|
||||
null,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadObjects = (bucketName, prefix, objects, filename, dispatch) => {
|
||||
const req = {
|
||||
bucketName: bucketName,
|
||||
prefix: prefix,
|
||||
objects: objects,
|
||||
}
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
})
|
||||
.catch((err) =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
)
|
||||
} else {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, filename, dispatch) => {
|
||||
var anchor = document.createElement("a")
|
||||
document.body.appendChild(anchor)
|
||||
|
||||
@@ -374,17 +434,17 @@ const downloadZip = (url, req, dispatch) => {
|
||||
xhr.open("POST", url, true)
|
||||
xhr.responseType = "blob"
|
||||
|
||||
xhr.onload = function(e) {
|
||||
xhr.onload = function (e) {
|
||||
if (this.status == 200) {
|
||||
dispatch(resetCheckedList())
|
||||
var blob = new Blob([this.response], {
|
||||
type: "octet/stream"
|
||||
type: "octet/stream",
|
||||
})
|
||||
var blobUrl = window.URL.createObjectURL(blob)
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
anchor.download = filename ||
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
|
||||
@@ -89,7 +89,8 @@ export default (
|
||||
shareObject: {
|
||||
show: action.show,
|
||||
object: action.object,
|
||||
url: action.url
|
||||
url: action.url,
|
||||
showExpiryDate: action.showExpiryDate
|
||||
}
|
||||
}
|
||||
case actionsObjects.CHECKED_LIST_ADD:
|
||||
|
||||
@@ -107,3 +107,13 @@ export const getRandomSecretKey = () => {
|
||||
const base64Str = btoa(binStr)
|
||||
return base64Str.replace(/\//g, "+").substr(0, 40)
|
||||
}
|
||||
|
||||
export const getRandomString = length => {
|
||||
var text = ""
|
||||
var possible =
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
for (var i = 0; i < length; i++) {
|
||||
text += possible.charAt(Math.floor(Math.random() * possible.length))
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
@@ -75,6 +75,16 @@ class Web {
|
||||
GetToken() {
|
||||
return storage.getItem('token')
|
||||
}
|
||||
GetDiscoveryDoc() {
|
||||
return this.makeCall("GetDiscoveryDoc")
|
||||
}
|
||||
LoginSTS(args) {
|
||||
return this.makeCall('LoginSTS', args)
|
||||
.then(res => {
|
||||
storage.setItem('token', `${res.token}`)
|
||||
return res
|
||||
})
|
||||
}
|
||||
ServerInfo() {
|
||||
return this.makeCall('ServerInfo')
|
||||
}
|
||||
@@ -125,4 +135,4 @@ class Web {
|
||||
|
||||
const web = new Web(`${window.location.protocol}//${window.location.host}${minioBrowserPrefix}/webrpc`);
|
||||
|
||||
export default web;
|
||||
export default web;
|
||||
|
||||
@@ -95,6 +95,41 @@
|
||||
}
|
||||
}
|
||||
|
||||
.openid-login {
|
||||
margin-top: 30px;
|
||||
}
|
||||
|
||||
.openid-btn {
|
||||
display: inline-block;
|
||||
color: @link-color;
|
||||
margin-top: 30px;
|
||||
border-width: 1px;
|
||||
border-style: solid;
|
||||
opacity: 0.6;
|
||||
font-size: 14px;
|
||||
&:hover {
|
||||
opacity: 1;
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
|
||||
.or {
|
||||
display:flex;
|
||||
justify-content:center;
|
||||
align-items: center;
|
||||
color:grey;
|
||||
}
|
||||
|
||||
.or:after,
|
||||
.or:before {
|
||||
content: "";
|
||||
display: block;
|
||||
background: grey;
|
||||
width: 10px;
|
||||
height: 1px;
|
||||
margin: 0 10px;
|
||||
}
|
||||
|
||||
/*------------------------------
|
||||
Chrome autofill fix
|
||||
-------------------------------*/
|
||||
@@ -102,4 +137,4 @@ input:-webkit-autofill {
|
||||
-webkit-box-shadow:0 0 0 50px #002a37 inset !important;
|
||||
-webkit-text-fill-color: @white !important;
|
||||
caret-color: white;
|
||||
}
|
||||
}
|
||||
|
||||
10793
browser/package-lock.json
generated
10793
browser/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -28,69 +28,70 @@
|
||||
},
|
||||
"homepage": "https://github.com/minio/minio",
|
||||
"devDependencies": {
|
||||
"async": "^1.5.2",
|
||||
"async": "^3.2.0",
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.3",
|
||||
"babel-jest": "^22.1.0",
|
||||
"babel-jest": "^23.6.0",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-syntax-object-rest-spread": "^6.13.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.8.0",
|
||||
"babel-polyfill": "^6.23.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.26.0",
|
||||
"babel-polyfill": "^6.26.0",
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.26.0",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.10.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"copy-webpack-plugin": "^6.0.1",
|
||||
"css-loader": "^3.5.3",
|
||||
"enzyme": "^3.11.0",
|
||||
"enzyme-adapter-react-16": "^1.15.2",
|
||||
"esformatter": "^0.11.3",
|
||||
"esformatter-jsx": "^8.0.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
"html-webpack-plugin": "^4.3.0",
|
||||
"jest": "^23.6.0",
|
||||
"jest-enzyme": "^7.1.2",
|
||||
"json-loader": "^0.5.7",
|
||||
"less": "^3.11.1",
|
||||
"less-loader": "^6.1.0",
|
||||
"purgecss-webpack-plugin": "^2.2.0",
|
||||
"style-loader": "^1.2.1",
|
||||
"url-loader": "^4.1.0",
|
||||
"webpack-cli": "^3.3.11",
|
||||
"webpack-dev-server": "^3.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-free": "^5.10.0",
|
||||
"bootstrap": "^3.3.6",
|
||||
"classnames": "^2.2.3",
|
||||
"expect": "^1.20.2",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"@fortawesome/fontawesome-free": "^5.13.0",
|
||||
"bootstrap": "^3.4.1",
|
||||
"classnames": "^2.2.6",
|
||||
"core-js": "^3.6.5",
|
||||
"expect": "^26.0.1",
|
||||
"glob-all": "^3.2.1",
|
||||
"history": "^4.10.1",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
"json-loader": "^0.5.4",
|
||||
"jwt-decode": "^2.2.0",
|
||||
"local-storage-fallback": "^4.0.2",
|
||||
"local-storage-fallback": "^4.1.1",
|
||||
"material-design-iconic-font": "^2.2.0",
|
||||
"mime-db": "^1.25.0",
|
||||
"mime-types": "^2.1.13",
|
||||
"moment": "^2.24.0",
|
||||
"react": "^16.2.0",
|
||||
"react-addons-test-utils": "^0.14.8",
|
||||
"react-bootstrap": "^0.32.0",
|
||||
"react-copy-to-clipboard": "^5.0.1",
|
||||
"mime-db": "^1.44.0",
|
||||
"mime-types": "^2.1.27",
|
||||
"moment": "^2.26.0",
|
||||
"query-string": "^6.12.1",
|
||||
"react": "^16.13.1",
|
||||
"react-addons-test-utils": "^15.6.2",
|
||||
"react-bootstrap": "^0.32.4",
|
||||
"react-copy-to-clipboard": "^5.0.2",
|
||||
"react-custom-scrollbars": "^4.2.1",
|
||||
"react-dom": "^16.2.0",
|
||||
"react-dropzone": "^4.2.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-dom": "^16.13.1",
|
||||
"react-dropzone": "^4.3.0",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-redux": "^5.0.6",
|
||||
"react-router-dom": "^4.2.0",
|
||||
"redux": "^3.7.2",
|
||||
"redux-mock-store": "^1.5.1",
|
||||
"redux-thunk": "^2.2.0",
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
"redux-mock-store": "^1.5.4",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"reselect": "^4.0.0",
|
||||
"superagent": "^5.2.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^4.28.3"
|
||||
"webpack": "^4.43.0"
|
||||
}
|
||||
}
|
||||
|
||||
1
browser/staticcheck.conf
Normal file
1
browser/staticcheck.conf
Normal file
@@ -0,0 +1 @@
|
||||
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]
|
||||
File diff suppressed because one or more lines are too long
@@ -73,16 +73,17 @@ var exports = {
|
||||
},
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
},
|
||||
'/minio/upload/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/download/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/zip': {
|
||||
target: 'http://localhost:9000',
|
||||
@@ -91,7 +92,7 @@ var exports = {
|
||||
}
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -101,7 +102,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -67,7 +67,7 @@ var exports = {
|
||||
fs:'empty'
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -77,7 +77,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
9009
browser/yarn.lock
9009
browser/yarn.lock
File diff suppressed because it is too large
Load Diff
@@ -1,67 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# usage: ./benchcmp.sh <commit-sha1> <commit-sha2>
|
||||
# Exit on any non zero return value on execution of a command.
|
||||
set -e
|
||||
|
||||
# path of benchcmp.
|
||||
benchcmp=${GOPATH}/bin/benchcmp
|
||||
|
||||
# function which runs the benchmark comparison.
|
||||
RunBenchCmp () {
|
||||
# Path for storing output of benchmark at commit 1.
|
||||
commit1Bench=/tmp/minio-$1.bench
|
||||
# Path for storing output of benchmark at commit 2.
|
||||
commit2Bench=/tmp/minio-$2.bench
|
||||
# switch to commit $1.
|
||||
git checkout $1
|
||||
# Check if the benchmark results for given commit 1 already exists.
|
||||
# Benchmarks are time/resource consuming operations, run only if the the results doesn't exist.
|
||||
if [[ ! -f $commit1Bench ]]
|
||||
then
|
||||
|
||||
echo "Running benchmarks at $1"
|
||||
go test -run=NONE -bench=. | tee $commit1Bench
|
||||
fi
|
||||
# get back to the commit from which it was started.
|
||||
git checkout -
|
||||
echo "Checking into commit $2"
|
||||
# switch to commit $2
|
||||
git checkout $2
|
||||
# Check if the benchmark results for given commit 2 already exists.
|
||||
# Benchmarks are time/resource consuming operations, run only if the the results doesn't exist.
|
||||
if [[ ! -f $commit2Bench ]]
|
||||
then
|
||||
# Running benchmarks at $2.
|
||||
echo "Running benchmarks at $2"
|
||||
go test -run=NONE -bench=. | tee $commit2Bench
|
||||
fi
|
||||
|
||||
# get back to the commit from which it was started.
|
||||
git checkout -
|
||||
# Comparing the benchmarks.
|
||||
echo "Running benchmark comparison between $1 and $2 ..."
|
||||
$benchcmp $commit1Bench $commit2Bench
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# check if 2 commit SHA's of snapshots of code for which benchmp has to be done is provided.
|
||||
if [ ! $# -eq 2 ]
|
||||
then
|
||||
# exit if commit SHA's are not provided.
|
||||
echo $#
|
||||
echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparison."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if benchcmp exists.
|
||||
if [[ -x "$benchcmp" ]]
|
||||
then
|
||||
RunBenchCmp $1 $2
|
||||
else
|
||||
# install benchcmp if doesnt't exist.
|
||||
echo "fetching Benchcmp..."
|
||||
go get -u golang.org/x/tools/cmd/benchcmp
|
||||
echo "Done."
|
||||
RunBenchCmp $1 $2
|
||||
fi
|
||||
@@ -1,14 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
# Enable tracing if set.
|
||||
[ -n "$BASH_XTRACEFD" ] && set -ex
|
||||
[ -n "$BASH_XTRACEFD" ] && set -x
|
||||
|
||||
function _init() {
|
||||
## All binaries are static make sure to disable CGO.
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
@@ -19,11 +20,11 @@ function _build() {
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
# go build -trimpath to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
go build -tags kqueue -o /dev/null
|
||||
go build -trimpath -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
@@ -24,7 +24,7 @@ function start_minio_server()
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json server /data --address 127.0.0.1:24242 > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
sleep 3
|
||||
sleep 10
|
||||
|
||||
echo "$server_pid"
|
||||
}
|
||||
@@ -35,7 +35,7 @@ function start_minio_gateway_s3()
|
||||
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
|
||||
--address 127.0.0.1:24240 > gateway.log 2>&1 &
|
||||
gw_pid=$!
|
||||
sleep 3
|
||||
sleep 10
|
||||
|
||||
echo "$gw_pid"
|
||||
}
|
||||
@@ -46,9 +46,10 @@ function main()
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh aws-sdk-go \
|
||||
aws-sdk-java aws-sdk-php aws-sdk-ruby awscli healthcheck minio-dotnet \
|
||||
minio-go minio-java minio-js minio-py
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
|
||||
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
|
||||
healthcheck mc minio-dotnet minio-js \
|
||||
minio-py s3cmd s3select security
|
||||
rv=$?
|
||||
|
||||
kill "$sr_pid"
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
7
buildscripts/race.sh
Executable file
7
buildscripts/race.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
|
||||
done
|
||||
@@ -69,63 +69,35 @@ function start_minio_erasure_sets()
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets_ipv6()
|
||||
function start_minio_zone_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 35
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets()
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address=:9004 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address=:9005 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address=:9006 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address=:9007 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address=:9008 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address=:9009 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 30
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
@@ -143,7 +115,7 @@ function start_minio_dist_erasure()
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
|
||||
sleep 30
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
@@ -182,9 +154,35 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
@@ -197,40 +195,14 @@ function run_test_dist_erasure_sets_ipv6()
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets) )
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-900$i.log"
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
@@ -351,15 +323,15 @@ function main()
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets"
|
||||
if ! run_test_dist_erasure_sets; then
|
||||
echo "Testing in Distributed Eraure expanded setup"
|
||||
if ! run_test_zone_erasure_sets; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets with ipv6"
|
||||
if ! run_test_dist_erasure_sets_ipv6; then
|
||||
echo "Testing in Distributed Erasure expanded setup with ipv6"
|
||||
if ! run_test_zone_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
126
buildscripts/verify-healing.sh
Executable file
126
buildscripts/verify-healing.sh
Executable file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function start_minio_3_node() {
|
||||
declare -a minio_pids
|
||||
declare -a ARGS
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
for i in $(seq 1 3); do
|
||||
ARGS+=("http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/")
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
disown "${minio_pids[0]}"
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
disown "${minio_pids[1]}"
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" ${ARGS[@]} > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
disown "${minio_pids[2]}"
|
||||
|
||||
sleep "$1"
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
sleep 1 # wait 1sec per pid
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__()
|
||||
{
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' > "$MINIO_CONFIG_DIR/config.json"
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 60
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 60
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
pkill -9 minio
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
perform_test "2"
|
||||
perform_test "1"
|
||||
perform_test "3"
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -18,11 +18,14 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
@@ -50,6 +53,71 @@ type accessControlPolicy struct {
|
||||
} `xml:"AccessControlList"`
|
||||
}
|
||||
|
||||
// PutBucketACLHandler - PUT Bucket ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL subresource
|
||||
// to set ACL for a bucket, this is a dummy call
|
||||
// only responds success if the ACL is private.
|
||||
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow putBucketACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
aclHeader := r.Header.Get(xhttp.AmzACL)
|
||||
if aclHeader == "" {
|
||||
acl := &accessControlPolicy{}
|
||||
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
|
||||
if err == io.EOF {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingSecurityHeader),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketACLHandler - GET Bucket ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL
|
||||
@@ -91,6 +159,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
},
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -99,6 +168,71 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutObjectACLHandler - PUT Object ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL subresource
|
||||
// to set ACL for a bucket, this is a dummy call
|
||||
// only responds success if the ACL is private.
|
||||
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutObjectACL")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow putObjectACL if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
aclHeader := r.Header.Get(xhttp.AmzACL)
|
||||
if aclHeader == "" {
|
||||
acl := &accessControlPolicy{}
|
||||
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectACLHandler - GET Object ACL
|
||||
// -----------------
|
||||
// This operation uses the ACL
|
||||
@@ -110,7 +244,11 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@@ -126,7 +264,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// Before proceeding validate if object exists.
|
||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
483
cmd/admin-handlers-config-kv.go
Normal file
483
cmd/admin-handlers-config-kv.go
Normal file
@@ -0,0 +1,483 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
return cred, objectAPI
|
||||
}
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = cfg.DelFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write to the config input KV to history.
|
||||
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg := globalServerConfig
|
||||
if globalSafeMode {
|
||||
var err error
|
||||
cfg, err = getValidConfig(objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
var buf = &bytes.Buffer{}
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, buf.Bytes())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
restoreID := vars["restoreId"]
|
||||
if restoreID == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
if restoreID == "all" {
|
||||
chEntries, err := listServerConfigHistory(ctx, objectAPI, false, -1)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, chEntry := range chEntries {
|
||||
if err = delServerConfigHistory(ctx, objectAPI, chEntry.RestoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
restoreID := vars["restoreId"]
|
||||
if restoreID == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
kvBytes, err := readServerConfigHistory(ctx, objectAPI, restoreID)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
delServerConfigHistory(ctx, objectAPI, restoreID)
|
||||
}
|
||||
|
||||
// ListConfigHistoryKVHandler - lists all the KV ids.
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
count, err := strconv.Atoi(vars["count"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
chEntries, err := listServerConfigHistory(ctx, objectAPI, true, count)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(chEntries)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
||||
subSys := vars["subSys"]
|
||||
key := vars["key"]
|
||||
|
||||
_, envOnly := r.URL.Query()["env"]
|
||||
|
||||
rd, err := GetHelp(subSys, key, envOnly)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(rd)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg := newServerConfig()
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write to the config input KV to history.
|
||||
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v3/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var s strings.Builder
|
||||
hkvs := config.HelpSubSysMap[""]
|
||||
var count int
|
||||
for _, hkv := range hkvs {
|
||||
count += len(cfg[hkv.Key])
|
||||
}
|
||||
for _, hkv := range hkvs {
|
||||
v := cfg[hkv.Key]
|
||||
for target, kv := range v {
|
||||
off := kv.Get(config.Enable) == config.EnableOff
|
||||
switch hkv.Key {
|
||||
case config.EtcdSubSys:
|
||||
off = !etcd.Enabled(kv)
|
||||
case config.CacheSubSys:
|
||||
off = !cache.Enabled(kv)
|
||||
case config.StorageClassSubSys:
|
||||
off = !storageclass.Enabled(kv)
|
||||
case config.KmsVaultSubSys:
|
||||
off = !crypto.EnabledVault(kv)
|
||||
case config.KmsKesSubSys:
|
||||
off = !crypto.EnabledKes(kv)
|
||||
case config.PolicyOPASubSys:
|
||||
off = !opa.Enabled(kv)
|
||||
case config.IdentityOpenIDSubSys:
|
||||
off = !openid.Enabled(kv)
|
||||
case config.IdentityLDAPSubSys:
|
||||
off = !xldap.Enabled(kv)
|
||||
}
|
||||
if off {
|
||||
s.WriteString(config.KvComment)
|
||||
s.WriteString(config.KvSpaceSeparator)
|
||||
}
|
||||
s.WriteString(hkv.Key)
|
||||
if target != config.Default {
|
||||
s.WriteString(config.SubSystemSeparator)
|
||||
s.WriteString(target)
|
||||
}
|
||||
s.WriteString(config.KvSpaceSeparator)
|
||||
s.WriteString(kv.String())
|
||||
count--
|
||||
if count > 0 {
|
||||
s.WriteString(config.KvNewline)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
917
cmd/admin-handlers-users.go
Normal file
917
cmd/admin-handlers-users.go
Normal file
@@ -0,0 +1,917 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
ok, err := globalIAMSys.IsTempUser(accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete user.
|
||||
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers - GET /minio/admin/v3/list-users
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListUsers()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(allCredentials)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// GetUserInfo - GET /minio/admin/v3/user-info
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(userInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var updReq madmin.GroupAddRemove
|
||||
err = json.Unmarshal(data, &updReq)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
|
||||
} else {
|
||||
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to load group.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetGroup - /minio/admin/v3/group?group=mygroup1
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
group := vars["group"]
|
||||
|
||||
gdesc, err := globalIAMSys.GetGroupDescription(group)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(gdesc)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// ListGroups - GET /minio/admin/v3/groups
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
groups, err := globalIAMSys.ListGroups()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(groups)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
group := vars["group"]
|
||||
status := vars["status"]
|
||||
|
||||
var err error
|
||||
if status == statusEnabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, true)
|
||||
} else if status == statusDisabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, false)
|
||||
} else {
|
||||
err = errInvalidArgument
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.SetUserStatus(accessKey, madmin.AccountStatus(status)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var uinfo madmin.UserInfo
|
||||
if err = json.Unmarshal(configBytes, &uinfo); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetUser(accessKey, uinfo); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var createReq madmin.AddServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
Credentials: auth.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(createResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccounts,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(listResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccount := mux.Vars(r)["accessKey"]
|
||||
if serviceAccount == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if parentUser != user || user == "" {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountUsageInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountUsageInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// log the error, continue with the accounting response
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountUsageInfo{
|
||||
AccountName: accountName,
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
var size uint64
|
||||
// Fetch the data usage of the current bucket
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
}
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created,
|
||||
Size: size,
|
||||
Access: madmin.AccountAccess{
|
||||
Read: rd,
|
||||
Write: wr,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
usageInfoJSON, err := json.Marshal(acctInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, usageInfoJSON)
|
||||
}
|
||||
|
||||
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(data)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyMap := make(map[string][]byte, len(policies))
|
||||
for k, p := range policies {
|
||||
var err error
|
||||
policyMap[k], err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete policy
|
||||
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if r.ContentLength > maxBucketPolicySize {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
iamPolicy, err := iampolicy.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Version in policy must not be empty
|
||||
if iamPolicy.Version == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload policy
|
||||
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["policyName"]
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload policy
|
||||
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,227 +25,14 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
var (
|
||||
configJSON = []byte(`{
|
||||
"version": "33",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"worm": "off",
|
||||
"storageclass": {
|
||||
"standard": "",
|
||||
"rrs": ""
|
||||
},
|
||||
"cache": {
|
||||
"drives": [],
|
||||
"expiry": 90,
|
||||
"maxuse": 80,
|
||||
"exclude": []
|
||||
},
|
||||
"kms": {
|
||||
"vault": {
|
||||
"endpoint": "",
|
||||
"auth": {
|
||||
"type": "",
|
||||
"approle": {
|
||||
"id": "",
|
||||
"secret": ""
|
||||
}
|
||||
},
|
||||
"key-id": {
|
||||
"name": "",
|
||||
"version": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"deliveryMode": 0,
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"url": "",
|
||||
"index": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false,
|
||||
"clientAuth": 0
|
||||
},
|
||||
"sasl": {
|
||||
"enable": false,
|
||||
"username": "",
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"mqtt": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"broker": "",
|
||||
"topic": "",
|
||||
"qos": 0,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"reconnectInterval": 0,
|
||||
"keepAliveInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"mysql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"dsnString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"nsq": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"nsqdAddress": "",
|
||||
"topic": "",
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false
|
||||
},
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"logger": {
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"http": {
|
||||
"1": {
|
||||
"enabled": false,
|
||||
"endpoint": "https://username:password@example.com/api"
|
||||
}
|
||||
}
|
||||
},
|
||||
"compress": {
|
||||
"enabled": false,
|
||||
"extensions":[".txt",".log",".csv",".json"],
|
||||
"mime-types":["text/csv","text/plain","application/json"]
|
||||
},
|
||||
"openid": {
|
||||
"jwks": {
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"opa": {
|
||||
"url": "",
|
||||
"authToken": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
// admin-handler unit tests.
|
||||
type adminXLTestBed struct {
|
||||
@@ -256,12 +43,17 @@ type adminXLTestBed struct {
|
||||
|
||||
// prepareAdminXLTestBed - helper function that setups a single-node
|
||||
// XL backend for admin-handler tests.
|
||||
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer()
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
return nil, xlErr
|
||||
}
|
||||
@@ -274,31 +66,11 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetNewEndpointList(xlDirs...)
|
||||
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
newAllSubsystems()
|
||||
|
||||
// initialize NSLock.
|
||||
isDistXL := false
|
||||
initNSLock(isDistXL)
|
||||
|
||||
// Init global heal state
|
||||
if globalIsXL {
|
||||
globalAllHealState = initHealState()
|
||||
}
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(objLayer)
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(objLayer)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
globalNotificationSys.Init(objLayer)
|
||||
initAllSubsystems(objLayer)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
@@ -320,20 +92,20 @@ func (atb *adminXLTestBed) TearDown() {
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
// layer and set globalObjectAPI.
|
||||
func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
xlDirs, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpointList(xlDirs...)
|
||||
format, err := waitForFormatXL(context.Background(), true, endpoints, 1, 16)
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(xlDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer, err := newXLSets(endpoints, format, 1, 16)
|
||||
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -391,7 +163,7 @@ func testServiceSignalReceiver(cmd cmdType, t *testing.T) {
|
||||
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) {
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("action", string(cmd.toServiceAction()))
|
||||
resource := "/minio/admin/v1/service?" + queryVal.Encode()
|
||||
resource := adminPathPrefix + adminAPIVersionPrefix + "/service?" + queryVal.Encode()
|
||||
req, err := newTestRequest(http.MethodPost, resource, 0, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -408,7 +180,10 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
|
||||
// testServicesCmdHandler - parametrizes service subcommand tests on
|
||||
// cmdType value.
|
||||
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminXLTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
@@ -430,7 +205,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
testServiceSignalReceiver(cmd, t)
|
||||
}()
|
||||
}
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
credentials := globalActiveCred
|
||||
|
||||
req, err := getServiceCmdRequest(cmd, credentials)
|
||||
if err != nil {
|
||||
@@ -460,13 +235,13 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
|
||||
req, err := newTestRequest(method,
|
||||
"/minio/admin/v1"+path+"?"+queryVal.Encode(),
|
||||
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
||||
contentLength, bodySeeker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -475,111 +250,15 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// TestGetConfigHandler - test for GetConfigHandler.
|
||||
func TestGetConfigHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
|
||||
// Prepare query params for get-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("config", "")
|
||||
|
||||
req, err := buildAdminRequest(queryVal, http.MethodGet, "/config", 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct get-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected to succeed but failed with %d", rec.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestSetConfigHandler - test for SetConfigHandler.
|
||||
func TestSetConfigHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
|
||||
// Prepare query params for set-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("config", "")
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
econfigJSON, err := madmin.EncryptData(password, configJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(econfigJSON)), bytes.NewReader(econfigJSON))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected to succeed but failed with %d, body: %s", rec.Code, rec.Body)
|
||||
}
|
||||
|
||||
// Check that a very large config file returns an error.
|
||||
{
|
||||
// Make a large enough config string
|
||||
invalidCfg := []byte(strings.Repeat("A", maxEConfigJSONSize+1))
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := rec.Body.String()
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that a config with duplicate keys in an object return
|
||||
// error.
|
||||
{
|
||||
invalidCfg := append(econfigJSON[:len(econfigJSON)-1], []byte(`, "version": "15"}`)...)
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := rec.Body.String()
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminXLTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
@@ -600,23 +279,14 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
t.Errorf("Expected to succeed but failed with %d", rec.Code)
|
||||
}
|
||||
|
||||
results := []ServerInfo{}
|
||||
results := madmin.InfoMessage{}
|
||||
err = json.NewDecoder(rec.Body).Decode(&results)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode set config result json %v", err)
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
t.Error("Expected at least one server info result")
|
||||
}
|
||||
|
||||
for _, serverInfo := range results {
|
||||
if serverInfo.Error != "" {
|
||||
t.Errorf("Unexpected error = %v\n", serverInfo.Error)
|
||||
}
|
||||
if serverInfo.Data.Properties.Region != globalMinioDefaultRegion {
|
||||
t.Errorf("Expected %s, got %s", globalMinioDefaultRegion, serverInfo.Data.Properties.Region)
|
||||
}
|
||||
if results.Region != globalMinioDefaultRegion {
|
||||
t.Errorf("Expected %s, got %s", globalMinioDefaultRegion, results.Region)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -639,12 +309,12 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
// 3. Non-admin API specific error.
|
||||
{
|
||||
err: errDiskNotFound,
|
||||
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
|
||||
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actualErr := toAdminAPIErrCode(context.Background(), test.err)
|
||||
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
|
||||
if actualErr != test.expectedAPIErr {
|
||||
t.Errorf("Test %d: Expected %v but received %v",
|
||||
i+1, test.expectedAPIErr, actualErr)
|
||||
@@ -652,46 +322,6 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopLockEntries(t *testing.T) {
|
||||
t1 := UTCNow()
|
||||
t2 := UTCNow().Add(10 * time.Second)
|
||||
peerLocks := []*PeerLocks{
|
||||
{
|
||||
Addr: "1",
|
||||
Locks: map[string][]lockRequesterInfo{
|
||||
"1": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
"2": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addr: "2",
|
||||
Locks: map[string][]lockRequesterInfo{
|
||||
"1": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
"2": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
les := topLockEntries(peerLocks)
|
||||
if len(les) != 2 {
|
||||
t.Fatalf("Did not get 2 results")
|
||||
}
|
||||
if les[0].Timestamp.After(les[1].Timestamp) {
|
||||
t.Fatalf("Got wrong sorted value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
|
||||
@@ -53,12 +53,15 @@ const (
|
||||
// time-duration to keep heal sequence state after it
|
||||
// completes.
|
||||
keepHealSeqStateDuration = time.Minute * 10
|
||||
|
||||
// nopHeal is a no operating healing action to
|
||||
// wait for the current healing operation to finish
|
||||
nopHeal = ""
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
@@ -69,10 +72,6 @@ var (
|
||||
|
||||
// healSequenceStatus - accumulated status of the heal sequence
|
||||
type healSequenceStatus struct {
|
||||
// lock to update this structure as it is concurrently
|
||||
// accessed
|
||||
updateLock *sync.RWMutex
|
||||
|
||||
// summary and detail for failures
|
||||
Summary healStatusSummary `json:"Summary"`
|
||||
FailureDetail string `json:"Detail,omitempty"`
|
||||
@@ -102,19 +101,17 @@ func initHealState() *allHealState {
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean()
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-time.After(time.Minute * 5):
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -123,7 +120,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
@@ -179,7 +176,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErr(context.Background(), err)
|
||||
return b, toAdminAPIErr(GlobalContext, err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
@@ -198,9 +195,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
if exists {
|
||||
if !he.hasEnded() || len(he.currentStatus.Items) > 0 {
|
||||
existsAndLive = true
|
||||
}
|
||||
existsAndLive = !he.hasEnded()
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
@@ -226,8 +221,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// Check if new heal sequence to be started overlaps with any
|
||||
// existing, running sequence
|
||||
for k, hSeq := range ahs.healSeqMap {
|
||||
if !hSeq.hasEnded() && (strings.HasPrefix(k, h.path) ||
|
||||
strings.HasPrefix(h.path, k)) {
|
||||
if !hSeq.hasEnded() && (HasPrefix(k, h.path) || HasPrefix(h.path, k)) {
|
||||
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
@@ -273,8 +267,8 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
}
|
||||
|
||||
// Take lock to access and update the heal-sequence
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
numItems := len(h.currentStatus.Items)
|
||||
|
||||
@@ -285,23 +279,27 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
|
||||
}
|
||||
|
||||
// After sending status to client, and before relinquishing
|
||||
// the updateLock, reset Item to nil and record the result
|
||||
// index sent to the client.
|
||||
defer func(i int64) {
|
||||
h.lastSentResultIndex = i
|
||||
h.currentStatus.Items = nil
|
||||
}(lastResultIndex)
|
||||
h.lastSentResultIndex = lastResultIndex
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
return jbytes, ErrNone
|
||||
}
|
||||
|
||||
// healSource denotes single entity and heal option.
|
||||
type healSource struct {
|
||||
path string // entity path (format, buckets, objects) to heal
|
||||
opts madmin.HealOpts // optional heal option overrides default setting
|
||||
}
|
||||
|
||||
// healSequence - state for each heal sequence initiated on the
|
||||
// server.
|
||||
type healSequence struct {
|
||||
@@ -311,12 +309,13 @@ type healSequence struct {
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// List of entities (format, buckets, objects) to heal
|
||||
sourceCh chan string
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// Report healing progress, false if this is a background
|
||||
// healing since currently there is no entity which will
|
||||
// receive realtime healing status
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
// Report healing progress
|
||||
reportProgress bool
|
||||
|
||||
// time at which heal sequence was started
|
||||
@@ -341,33 +340,42 @@ type healSequence struct {
|
||||
// completed
|
||||
traverseAndHealDoneCh chan error
|
||||
|
||||
// channel to signal heal sequence to stop (e.g. from the
|
||||
// heal-stop API)
|
||||
stopSignalCh chan struct{}
|
||||
// canceler to cancel heal sequence.
|
||||
cancelCtx context.CancelFunc
|
||||
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
|
||||
// Number of total items scanned
|
||||
scannedItemsCount int64
|
||||
// Number of total items scanned against item type
|
||||
scannedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
|
||||
// Holds the request-info for logging
|
||||
ctx context.Context
|
||||
|
||||
// used to lock this structure as it is concurrently accessed
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
|
||||
return &healSequence{
|
||||
respCh: make(chan healResult),
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
@@ -381,19 +389,92 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
Summary: healNotStartedStatus,
|
||||
HealSettings: hs,
|
||||
NumDisks: numDisks,
|
||||
updateLock: &sync.RWMutex{},
|
||||
},
|
||||
traverseAndHealDoneCh: make(chan error),
|
||||
stopSignalCh: make(chan struct{}),
|
||||
cancelCtx: cancel,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
// resetHealStatusCounters - reset the healSequence status counters between
|
||||
// each monthly background heal scanning activity.
|
||||
// This is used only in case of Background healing scenario, where
|
||||
// we use a single long running healSequence which reactively heals
|
||||
// objects passed to the SourceCh.
|
||||
func (h *healSequence) resetHealStatusCounters() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.currentStatus.Items = []madmin.HealResultItem{}
|
||||
h.lastSentResultIndex = 0
|
||||
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healFailedItemsMap = make(map[string]int64)
|
||||
}
|
||||
|
||||
// getScannedItemsCount - returns a count of all scanned items
|
||||
func (h *healSequence) getScannedItemsCount() int64 {
|
||||
var count int64
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// getScannedItemsMap - returns map of all scanned items against type
|
||||
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// getHealedItemsMap - returns the map of all healed items against type
|
||||
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -402,19 +483,15 @@ func (h *healSequence) isQuitting() bool {
|
||||
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.currentStatus.updateLock.RLock()
|
||||
summary := h.currentStatus.Summary
|
||||
h.currentStatus.updateLock.RUnlock()
|
||||
return summary == healStoppedStatus || summary == healFinishedStatus
|
||||
h.mutex.RLock()
|
||||
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
|
||||
h.mutex.RUnlock()
|
||||
return ended
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
func (h *healSequence) stop() {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
default:
|
||||
close(h.stopSignalCh)
|
||||
}
|
||||
h.cancelCtx()
|
||||
}
|
||||
|
||||
// pushHealResultItem - pushes a heal result item for consumption in
|
||||
@@ -441,29 +518,27 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
itemsLen = len(h.currentStatus.Items)
|
||||
if itemsLen == maxUnconsumedHealResultItems {
|
||||
// unlock and wait to check again if we can push
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// wait for a second, or quit if an external
|
||||
// stop signal is received or the
|
||||
// unconsumedTimer fires.
|
||||
select {
|
||||
// Check after a second
|
||||
case <-time.After(time.Second):
|
||||
h.mutex.Unlock()
|
||||
continue
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Unlock()
|
||||
// discard result and return.
|
||||
return errHealPushStopNDiscard
|
||||
return errHealStopSignalled
|
||||
|
||||
// Timeout if no results consumed for too
|
||||
// long.
|
||||
// Timeout if no results consumed for too long.
|
||||
case <-unconsumedTimer.C:
|
||||
h.mutex.Unlock()
|
||||
return errHealIdleTimeout
|
||||
|
||||
}
|
||||
}
|
||||
break
|
||||
@@ -480,13 +555,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
h.currentStatus.Items = append(h.currentStatus.Items, r)
|
||||
|
||||
// release lock
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// This is a "safe" point for the heal sequence to quit if
|
||||
// signaled externally.
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
h.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -500,10 +569,10 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
// Set status as running
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
@@ -513,25 +582,27 @@ func (h *healSequence) healSequenceStart() {
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
if ok {
|
||||
if err == nil {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
} else {
|
||||
// heal traversal had an error.
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = err.Error()
|
||||
} else {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.mutex.Unlock()
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
// drain traverse channel so the traversal
|
||||
// go-routine does not leak.
|
||||
@@ -544,76 +615,108 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
|
||||
var respCh = make(chan healResult)
|
||||
defer close(respCh)
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
// Send heal request
|
||||
globalBackgroundHealing.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
|
||||
// Wait for answer and push result to the client
|
||||
res := <-respCh
|
||||
if !h.reportProgress {
|
||||
return nil
|
||||
task := healTask{
|
||||
path: source.path,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
if !source.opts.Equal(h.settings) {
|
||||
task.opts = source.opts
|
||||
}
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
h.lastHealActivity = UTCNow()
|
||||
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
bucketsOnly := true // heal buckets only, not objects.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case source.path == nopHeal:
|
||||
continue
|
||||
case source.path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(source.path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
for path := range h.sourceCh {
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(path, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
h.scannedItemsCount++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
if err := h.healItemsFromSourceCh(); err != nil {
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
close(h.traverseAndHealDoneCh)
|
||||
h.healItemsFromSourceCh()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems() error {
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
@@ -630,7 +733,7 @@ func (h *healSequence) healItems() error {
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets()
|
||||
return h.healBuckets(bucketsOnly)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -641,13 +744,8 @@ func (h *healSequence) healItems() error {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
if err := h.healItems(); err != nil {
|
||||
if h.isQuitting() {
|
||||
err = errHealStopSignalled
|
||||
}
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@@ -656,7 +754,7 @@ func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -664,12 +762,12 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, func(bucket string, object string) error {
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
|
||||
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
@@ -688,27 +786,27 @@ func (h *healSequence) healDiskFormat() error {
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets() error {
|
||||
func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket)
|
||||
return h.healBucket(h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
@@ -719,7 +817,7 @@ func (h *healSequence) healBuckets() error {
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(bucket.Name); err != nil {
|
||||
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -728,17 +826,21 @@ func (h *healSequence) healBuckets() error {
|
||||
}
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string) error {
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
|
||||
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
@@ -754,7 +856,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return nil
|
||||
@@ -762,15 +864,15 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
|
||||
}
|
||||
|
||||
118
cmd/admin-quota-handlers.go
Normal file
118
cmd/admin-quota-handlers.go
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
@@ -20,127 +20,203 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
adminAPIPathPrefix = "/minio/admin"
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersionV2 = madmin.AdminAPIVersionV2
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
|
||||
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
|
||||
)
|
||||
|
||||
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
|
||||
type adminAPIHandlers struct {
|
||||
}
|
||||
type adminAPIHandlers struct{}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminAPIPathPrefix).Subrouter()
|
||||
|
||||
// Version handler
|
||||
adminV1Router := adminRouter.PathPrefix("/v1").Subrouter()
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
||||
/// Service operations
|
||||
|
||||
// Restart and stop MinIO service.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminV1Router.Methods(http.MethodPost).Path("/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminV1Router.Methods(http.MethodPost).Path("/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminV1Router.Methods(http.MethodGet).Path("/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminV1Router.Methods(http.MethodPost).Path("/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminV1Router.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminV1Router.Methods(http.MethodGet).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminV1Router.Methods(http.MethodPut).Path("/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
adminAPIVersionV2Prefix,
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Add policy IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
// Add user IAM
|
||||
adminV1Router.Methods(http.MethodPut).Path("/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
// Info policy IAM
|
||||
adminV1Router.Methods(http.MethodGet).Path("/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Remove policy IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
// Set user or group policy
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
// Remove user IAM
|
||||
adminV1Router.Methods(http.MethodDelete).Path("/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
/// Health operations
|
||||
|
||||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
}
|
||||
|
||||
// User info
|
||||
adminV1Router.Methods(http.MethodGet).Path("/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Add/Remove members from group
|
||||
adminV1Router.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
}
|
||||
|
||||
// Get Group
|
||||
adminV1Router.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
// Enable config help in all modes.
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
|
||||
// List Groups
|
||||
adminV1Router.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
// Config KV history operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
// Set Group Status
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
/// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
}
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
if adminVersion == adminAPIVersionV2Prefix {
|
||||
// Info policy IAM v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
|
||||
} else {
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
}
|
||||
|
||||
// Quota operations
|
||||
if globalIsXL || globalIsDistXL {
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
}
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistXL {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
adminV1Router.Methods(http.MethodGet).Path("/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
|
||||
// HTTP Trace
|
||||
adminV1Router.Methods(http.MethodGet).Path("/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminV1Router.Methods(http.MethodGet).Path("/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminV1Router.Methods(http.MethodGet).Path("/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
// If none of the routes match, return error.
|
||||
adminV1Router.NotFoundHandler = http.HandlerFunc(httpTraceHdrs(notFoundHandlerJSON))
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
}
|
||||
|
||||
@@ -20,95 +20,67 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for only the
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var disks []madmin.Disk
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, endpoint := range endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
network := make(map[string]string)
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
nodeName = r.Host
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
var di = madmin.Disk{
|
||||
DrivePath: endpoint.Path,
|
||||
}
|
||||
diInfo, err := disk.GetInfo(endpoint.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
|
||||
di.State = madmin.DriveStateMissing
|
||||
} else {
|
||||
di.State = madmin.DriveStateCorrupt
|
||||
}
|
||||
} else {
|
||||
di.State = madmin.DriveStateOk
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
}
|
||||
disks = append(disks, di)
|
||||
} else {
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
err := IsServerResolvable(endpoint)
|
||||
if err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
}
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
Perf: dps,
|
||||
Size: size,
|
||||
|
||||
return madmin.ServerProperties{
|
||||
State: "ok",
|
||||
Endpoint: addr,
|
||||
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: disks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ type ObjectIdentifier struct {
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for MakeBucketbucket.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
Location string `xml:"LocationConstraint"`
|
||||
|
||||
@@ -21,17 +21,21 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -90,9 +94,15 @@ const (
|
||||
ErrMissingContentLength
|
||||
ErrMissingContentMD5
|
||||
ErrMissingRequestBodyError
|
||||
ErrMissingSecurityHeader
|
||||
ErrNoSuchBucket
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
@@ -114,7 +124,8 @@ const (
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
ErrInvalidRegion
|
||||
ErrInvalidService
|
||||
ErrInvalidServiceS3
|
||||
ErrInvalidServiceSTS
|
||||
ErrInvalidRequestVersion
|
||||
ErrMissingSignTag
|
||||
ErrMissingSignHeadersTag
|
||||
@@ -141,6 +152,17 @@ const (
|
||||
ErrInvalidPrefixMarker
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrObjectLockConfigurationNotFound
|
||||
ErrObjectLockConfigurationNotAllowed
|
||||
ErrNoSuchObjectLockConfiguration
|
||||
ErrObjectLocked
|
||||
ErrInvalidRetentionDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrUnknownWORMModeDirective
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrInvalidTagDirective
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
@@ -193,6 +215,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
@@ -216,6 +239,10 @@ const (
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -315,19 +342,28 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError {
|
||||
apiErr, ok := e[errCode]
|
||||
if !ok {
|
||||
return e[ErrInternalError]
|
||||
apiErr = e[ErrInternalError]
|
||||
}
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
return e.ToAPIErrWithErr(errCode, nil)
|
||||
}
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
// descriptions for all the error responses.
|
||||
var errorCodes = errorCodeMap{
|
||||
@@ -423,7 +459,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The access key ID you provided does not exist in our records.",
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
@@ -456,6 +492,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Missing required header for this request: Content-Md5.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSecurityHeader: {
|
||||
Code: "MissingSecurityHeader",
|
||||
Description: "Your request was missing a required header",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingRequestBodyError: {
|
||||
Code: "MissingRequestBodyError",
|
||||
Description: "Request body is empty.",
|
||||
@@ -476,6 +517,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The bucket lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchLifecycleConfiguration: {
|
||||
Code: "NoSuchLifecycleConfiguration",
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchBucketSSEConfig: {
|
||||
Code: "ServerSideEncryptionConfigurationNotFoundError",
|
||||
Description: "The server side encryption configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchKey: {
|
||||
Code: "NoSuchKey",
|
||||
Description: "The specified key does not exist.",
|
||||
@@ -620,9 +671,14 @@ var errorCodes = errorCodeMap{
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidService: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidServiceSTS: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
@@ -720,7 +776,71 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Duration provided in the request is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
ErrInvalidBucketObjectLockConfiguration: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketTaggingNotFound: {
|
||||
Code: "NoSuchTagSet",
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotFound: {
|
||||
Code: "ObjectLockConfigurationNotFoundError",
|
||||
Description: "Object Lock configuration does not exist for this bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrNoSuchCORSConfiguration: {
|
||||
Code: "NoSuchCORSConfiguration",
|
||||
Description: "The CORS configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchWebsiteConfiguration: {
|
||||
Code: "NoSuchWebsiteConfiguration",
|
||||
Description: "The specified bucket does not have a website configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationConfigurationNotFoundError: {
|
||||
Code: "ReplicationConfigurationNotFoundError",
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLocked: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Object is WORM protected and cannot be overwritten",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRetentionDate: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Date must be provided in ISO 8601 format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPastObjectLockRetainDate: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "the retain until date must be in the future",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnknownWORMModeDirective: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "unknown wormMode directive",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLockInvalidHeaders: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -787,6 +907,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagDirective: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unknown tag directive.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncryptionMethod: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The encryption method specified is not supported",
|
||||
@@ -992,6 +1117,21 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrAdminBucketQuotaExceeded: {
|
||||
Code: "XMinioAdminBucketQuotaExceeded",
|
||||
Description: "Bucket quota exceeded",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchQuotaConfiguration: {
|
||||
Code: "XMinioAdminNoSuchQuotaConfiguration",
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
@@ -1002,6 +1142,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "A timeout occurred while trying to lock a resource",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "XMinioServerTimedOut",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Your metadata headers are not supported.",
|
||||
@@ -1497,6 +1642,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The specified service account is not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrPostPolicyConditionInvalidFormat: {
|
||||
Code: "PostPolicyInvalidKeyName",
|
||||
Description: "Invalid according to Policy: Policy Condition failed",
|
||||
@@ -1512,7 +1667,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1565,10 +1720,20 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case crypto.ErrKMSAuthLogin:
|
||||
apiErr = ErrKMSAuthFailure
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
apiErr = ErrSlowDown
|
||||
case objectlock.ErrInvalidRetentionDate:
|
||||
apiErr = ErrInvalidRetentionDate
|
||||
case objectlock.ErrPastObjectLockRetainDate:
|
||||
apiErr = ErrPastObjectLockRetainDate
|
||||
case objectlock.ErrUnknownWORMModeDirective:
|
||||
apiErr = ErrUnknownWORMModeDirective
|
||||
case objectlock.ErrObjectLockInvalidHeaders:
|
||||
apiErr = ErrObjectLockInvalidHeaders
|
||||
case objectlock.ErrMalformedXML:
|
||||
apiErr = ErrMalformedXML
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -1658,7 +1823,17 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
case BucketPolicyNotFound:
|
||||
apiErr = ErrNoSuchBucketPolicy
|
||||
case BucketLifecycleNotFound:
|
||||
apiErr = ErrNoSuchBucketLifecycle
|
||||
apiErr = ErrNoSuchLifecycleConfiguration
|
||||
case BucketSSEConfigNotFound:
|
||||
apiErr = ErrNoSuchBucketSSEConfig
|
||||
case BucketTaggingNotFound:
|
||||
apiErr = ErrBucketTaggingNotFound
|
||||
case BucketObjectLockConfigNotFound:
|
||||
apiErr = ErrObjectLockConfigurationNotFound
|
||||
case BucketQuotaConfigNotFound:
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1681,10 +1856,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrOverlappingFilterNotification
|
||||
case *event.ErrUnsupportedConfiguration:
|
||||
apiErr = ErrUnsupportedNotification
|
||||
case OperationTimedOut:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case BackendDown:
|
||||
apiErr = ErrBackendDown
|
||||
case crypto.Error:
|
||||
apiErr = ErrObjectTampered
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
default:
|
||||
@@ -1729,9 +1904,41 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case *xml.SyntaxError:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description,
|
||||
e.Error()),
|
||||
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
|
||||
}
|
||||
case url.EscapeError:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrInvalidObjectName].Description,
|
||||
e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case lifecycle.Error:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tags.Error:
|
||||
apiErr = APIError{
|
||||
Code: e.Code(),
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case policy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedPolicy",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case crypto.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XKMSInternalError",
|
||||
Code: "XMinIOEncryptionError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
@@ -1753,17 +1960,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case storage.AzureStorageServiceError:
|
||||
case azblob.StorageError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
Code: string(e.ServiceCode()),
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
}
|
||||
@@ -1790,7 +1991,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
Region: globalServerRegion,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
@@ -65,6 +67,11 @@ var toAPIErrorTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer os.RemoveAll(disk)
|
||||
|
||||
initFSObjects(disk, t)
|
||||
|
||||
ctx := context.Background()
|
||||
for i, testCase := range toAPIErrorTests {
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
|
||||
@@ -22,7 +22,9 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
@@ -40,7 +42,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
if region := globalServerRegion; region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -80,6 +82,10 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||
}
|
||||
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header()[xhttp.AmzMpPartsCount] = []string{strconv.Itoa(len(objInfo.Parts))}
|
||||
}
|
||||
|
||||
if objInfo.ContentType != "" {
|
||||
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
|
||||
}
|
||||
@@ -92,30 +98,42 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
||||
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if hasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
w.Header().Set(k, v)
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
var totalObjectSize int64
|
||||
switch {
|
||||
case crypto.IsEncrypted(objInfo.UserDefined):
|
||||
totalObjectSize, err = objInfo.DecryptedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
@@ -86,11 +87,19 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
}
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
token = values.Get("continuation-token")
|
||||
startAfter = values.Get("start-after")
|
||||
delimiter = values.Get("delimiter")
|
||||
fetchOwner = values.Get("fetch-owner") == "true"
|
||||
encodingType = values.Get("encoding-type")
|
||||
|
||||
if token = values.Get("continuation-token"); token != "" {
|
||||
decodedToken, err := base64.StdEncoding.DecodeString(token)
|
||||
if err != nil {
|
||||
errCode = ErrIncorrectContinuationToken
|
||||
return
|
||||
}
|
||||
token = string(decodedToken)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
{
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"continuation-token": []string{"dG9rZW4="},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
@@ -53,7 +53,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
{
|
||||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"continuation-token": []string{"dG9rZW4="},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
@@ -64,7 +64,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
||||
startAfter: "start-after",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 1000,
|
||||
maxKeys: maxObjectList,
|
||||
encodingType: "gzip",
|
||||
errCode: ErrNone,
|
||||
},
|
||||
@@ -150,7 +150,7 @@ func TestListObjectsV1Resources(t *testing.T) {
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 1000,
|
||||
maxKeys: maxObjectList,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -32,10 +33,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse.
|
||||
maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 1000 // Limit number of parts in a listPartsResponse.
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 50000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -238,6 +241,37 @@ type ObjectVersion struct {
|
||||
IsLatest bool
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string.
|
||||
type StringMap map[string]string
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
t := xml.StartElement{}
|
||||
t.Name = xml.Name{
|
||||
Space: "",
|
||||
Local: key,
|
||||
}
|
||||
tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name})
|
||||
}
|
||||
|
||||
tokens = append(tokens, xml.EndElement{
|
||||
Name: start.Name,
|
||||
})
|
||||
|
||||
for _, t := range tokens {
|
||||
if err := e.EncodeToken(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// flush to ensure tokens are written
|
||||
return e.Flush()
|
||||
}
|
||||
|
||||
// Object container for object metadata
|
||||
type Object struct {
|
||||
Key string
|
||||
@@ -250,6 +284,9 @@ type Object struct {
|
||||
|
||||
// The class of storage used to store the object.
|
||||
StorageClass string
|
||||
|
||||
// UserMetadata user-defined metadata
|
||||
UserMetadata StringMap `xml:"UserMetadata,omitempty"`
|
||||
}
|
||||
|
||||
// CopyObjectResponse container returns ETag and LastModified of the successfully copied object
|
||||
@@ -365,7 +402,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(timeFormatAMZLong)
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
|
||||
listbuckets = append(listbuckets, listbucket)
|
||||
}
|
||||
|
||||
@@ -389,20 +426,25 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
versions = append(versions, content)
|
||||
}
|
||||
|
||||
data.Name = bucket
|
||||
data.Versions = versions
|
||||
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
||||
@@ -435,12 +477,16 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
contents = append(contents, content)
|
||||
}
|
||||
@@ -465,7 +511,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
@@ -481,13 +527,28 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
contents = append(contents, content)
|
||||
}
|
||||
data.Name = bucket
|
||||
@@ -498,8 +559,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
data.ContinuationToken = s3EncodeName(token, encodingType)
|
||||
data.NextContinuationToken = s3EncodeName(nextToken, encodingType)
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
@@ -515,7 +576,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
|
||||
return CopyObjectResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -523,7 +584,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
|
||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
||||
return CopyObjectPartResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -542,7 +603,8 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: etag,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -567,7 +629,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.PartNumber = part.PartNumber
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
newPart.Size = part.Size
|
||||
newPart.LastModified = part.LastModified.UTC().Format(timeFormatAMZLong)
|
||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -597,7 +659,7 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
|
||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
||||
}
|
||||
return listMultipartUploadsResponse
|
||||
@@ -675,7 +737,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
@@ -693,6 +755,11 @@ func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
|
||||
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
|
||||
}
|
||||
|
||||
func writeErrorResponseString(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
// Generate string error response.
|
||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
||||
}
|
||||
|
||||
// writeErrorResponseJSON - writes error response in JSON format;
|
||||
// useful for admin APIs.
|
||||
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
@@ -735,7 +802,7 @@ func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -23,6 +23,37 @@ import (
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
@@ -38,7 +69,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCacheObjectsFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
@@ -59,108 +90,208 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
for _, bucket := range routers {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler))
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "")
|
||||
// GetObjectTagging - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler))
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketACLHandler)).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketCorsHandler)).Queries("cors", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketWebsiteHandler)).Queries("website", "")
|
||||
// GetBucketVersioningHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketAccelerateHandler)).Queries("accelerate", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketRequestPaymentHandler)).Queries("requestPayment", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLoggingHandler)).Queries("logging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketReplicationHandler)).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketWebsiteHandler)).Queries("website", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketTaggingHandler)).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.ListBucketObjectVersionsHandler)).Queries("versions", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV1Handler))
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketPolicyHandler)).Queries("policy", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketHandler))
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(httpTraceHdrs(api.PostPolicyBucketHandler))
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketPolicyHandler)).Queries("policy", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketLifecycleHandler)).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(httpTraceAll(api.DeleteBucketHandler))
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
|
||||
// If none of the routes match.
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
}
|
||||
|
||||
@@ -26,15 +26,18 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
@@ -118,22 +121,16 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeUnknown
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
var owner bool
|
||||
_, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if !owner {
|
||||
return ErrAccessDenied
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
@@ -142,9 +139,37 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region stri
|
||||
if s3Err != ErrNone {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
return s3Err
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, ErrNone
|
||||
}
|
||||
|
||||
return cred, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Fetch the security token set by the client.
|
||||
@@ -159,18 +184,18 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
claims := make(map[string]interface{})
|
||||
token := getSessionToken(r)
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
return claims, nil
|
||||
return claims.Map(), nil
|
||||
}
|
||||
stsTokenCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
|
||||
stsTokenCallback := func(claims *xjwt.MapClaims) ([]byte, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -179,68 +204,45 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalServerConfig.GetCredential().SecretKey), nil
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
p := &jwtgo.Parser{
|
||||
ValidMethods: []string{
|
||||
jwtgo.SigningMethodHS256.Alg(),
|
||||
jwtgo.SigningMethodHS512.Alg(),
|
||||
},
|
||||
}
|
||||
jtoken, err := p.ParseWithClaims(token, jwtgo.MapClaims(claims), stsTokenCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !jtoken.Valid {
|
||||
|
||||
if err := xjwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
v, ok := claims["accessKey"]
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
if _, ok = v.(string); !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set,
|
||||
// allow the claim.
|
||||
if _, ok := claims[ldapUser]; ok {
|
||||
return claims, nil
|
||||
// If OPA is not set and if ldap claim key is set, allow the claim.
|
||||
if _, ok := claims.Lookup(ldapUser); ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
p, pok := claims[iampolicy.PolicyName]
|
||||
if !pok {
|
||||
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
|
||||
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
if _, pok = p.(string); !pok {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
sp, spok := claims[iampolicy.SessionPolicyName]
|
||||
// Sub policy is optional, if not set return success.
|
||||
|
||||
sp, spok := claims.Lookup(iampolicy.SessionPolicyName)
|
||||
if !spok {
|
||||
return claims, nil
|
||||
}
|
||||
// Sub policy is set but its not a string, reject such requests
|
||||
spStr, spok := sp.(string)
|
||||
if !spok {
|
||||
return nil, errAuthentication
|
||||
return claims.Map(), nil
|
||||
}
|
||||
// Looks like subpolicy is set and is a string, if set then its
|
||||
// base64 encoded, decode it. Decoding fails reject such requests.
|
||||
spBytes, err := base64.StdEncoding.DecodeString(spStr)
|
||||
spBytes, err := base64.StdEncoding.DecodeString(sp)
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
}
|
||||
return claims, nil
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
@@ -249,12 +251,15 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
@@ -279,14 +284,14 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var cred auth.Credentials
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -312,7 +317,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// To extract region from XML in request body, get copy of request body.
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
return accessKey, owner, ErrMalformedXML
|
||||
}
|
||||
|
||||
@@ -334,21 +339,20 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, ""),
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
@@ -356,7 +360,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -389,12 +393,11 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
err error
|
||||
contentMD5, contentSHA256 []byte
|
||||
)
|
||||
|
||||
// Extract 'Content-Md5' if present.
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; ok {
|
||||
contentMD5, err = base64.StdEncoding.Strict().DecodeString(r.Header.Get(xhttp.ContentMD5))
|
||||
if err != nil || len(contentMD5) == 0 {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
contentMD5, err = checkValidMD5(r.Header)
|
||||
if err != nil {
|
||||
return ErrInvalidDigest
|
||||
}
|
||||
|
||||
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
|
||||
@@ -420,7 +423,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
r.Body = reader
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -470,22 +473,122 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// isPutAllowed - check if PUT operation is allowed on the resource, this
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, nil, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, claims, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
conditions := getConditionValues(r, "", "", nil)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.BypassGovernanceRetentionAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.PutObjectRetentionAction),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
})
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
|
||||
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
@@ -497,12 +600,21 @@ func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
if action == iampolicy.PutObjectRetentionAction &&
|
||||
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
|
||||
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", ""),
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
@@ -513,9 +625,9 @@ func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// Test get request auth type.
|
||||
@@ -273,7 +274,7 @@ func mustNewRequest(method string, urlStr string, contentLength int64, body io.R
|
||||
// is signed with AWS Signature V4, fails if not able to do so.
|
||||
func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -284,7 +285,7 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod
|
||||
// is signed with AWS Signature V2, fails if not able to do so.
|
||||
func mustNewSignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -295,7 +296,7 @@ func mustNewSignedV2Request(method string, urlStr string, contentLength int64, b
|
||||
// is presigned with AWS Signature V2, fails if not able to do so.
|
||||
func mustNewPresignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -306,7 +307,7 @@ func mustNewPresignedV2Request(method string, urlStr string, contentLength int64
|
||||
// is presigned with AWS Signature V4, fails if not able to do so.
|
||||
func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -316,7 +317,7 @@ func mustNewPresignedRequest(method string, urlStr string, contentLength int64,
|
||||
func mustNewSignedShortMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "invalid-digest")
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
@@ -326,17 +327,18 @@ func mustNewSignedShortMD5Request(method string, urlStr string, contentLength in
|
||||
func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "")
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
|
||||
body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
@@ -359,7 +361,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
|
||||
globalServerConfig.SetCredential(creds)
|
||||
globalActiveCred = creds
|
||||
|
||||
// List of test cases for validating http request authentication.
|
||||
testCases := []struct {
|
||||
@@ -381,7 +383,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion(), serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerRegion, serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -389,6 +391,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -405,7 +408,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
|
||||
globalServerConfig.SetCredential(creds)
|
||||
globalActiveCred = creds
|
||||
testCases := []struct {
|
||||
Request *http.Request
|
||||
ErrCode APIErrorCode
|
||||
@@ -418,8 +421,53 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuthType(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
globalActiveCred = creds
|
||||
|
||||
testCases := []struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{"", "", ErrInvalidAccessKeyID},
|
||||
{"admin", "", ErrSignatureDoesNotMatch},
|
||||
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
|
||||
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"admin", "mypassword", ErrNone},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
||||
if s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,41 +52,49 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run() {
|
||||
ctx := context.Background()
|
||||
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if globalHTTPServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
bucket, object := urlPath2BucketObjectName(task.path)
|
||||
bucket, object := path2BucketObject(task.path)
|
||||
switch {
|
||||
case bucket == "" && object == "":
|
||||
res, err = bgHealDiskFormat(ctx, task.opts)
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case bucket != "" && object == "":
|
||||
res, err = bgHealBucket(ctx, bucket, task.opts)
|
||||
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
|
||||
case bucket != "" && object != "":
|
||||
res, err = bgHealObject(ctx, bucket, object, task.opts)
|
||||
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
|
||||
}
|
||||
if task.path != slashSeparator && task.path != nopHeal {
|
||||
ObjectPathUpdated(task.path)
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -100,23 +108,28 @@ func initHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing() {
|
||||
healBg := initHealRoutine()
|
||||
go healBg.run()
|
||||
func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = initHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealing = healBg
|
||||
// Launch the background healer sequence to track
|
||||
// background healing operations, ignore errors
|
||||
// errors are handled into offline disks already.
|
||||
info, _ := objAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
|
||||
nh := newBgHealSequence(numDisks)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
// bgHealDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
go startBackgroundHealing(ctx, objAPI)
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
res, err := objAPI.HealFormat(ctx, opts.DryRun)
|
||||
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
@@ -137,24 +150,3 @@ func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealRes
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// bghealBucket - traverses and heals given bucket
|
||||
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
|
||||
}
|
||||
|
||||
// bgHealObject - heal the given object and record result
|
||||
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
|
||||
}
|
||||
|
||||
126
cmd/background-newdisks-heal-ops.go
Normal file
126
cmd/background-newdisks-heal-ops.go
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 10
|
||||
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*xlZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
for {
|
||||
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if found {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
var healNewDisks bool
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
healNewDisks = true
|
||||
}
|
||||
|
||||
// Reformat disks only if needed.
|
||||
if !healNewDisks {
|
||||
continue
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{path: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{path: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,11 +28,6 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Prepare XL/FS backend for benchmark.
|
||||
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
|
||||
return prepareTestBackend(instanceType)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -40,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -81,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -135,7 +130,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -149,7 +146,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -163,7 +162,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -180,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -242,7 +243,9 @@ func generateBytesData(size int) []byte {
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -256,7 +259,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -273,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -317,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -18,14 +18,22 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
type errHashMismatch struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (err *errHashMismatch) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
@@ -131,8 +139,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = HashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
|
||||
logger.LogIf(context.Background(), err)
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user