Compare commits
1139 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c606c76323 | ||
|
|
d674263eb7 | ||
|
|
e7d3b49a20 | ||
|
|
5df61ab96b | ||
|
|
3456b03b12 | ||
|
|
f6fb27e8f0 | ||
|
|
8368ab76aa | ||
|
|
3e83643320 | ||
|
|
2eb52ca5f4 | ||
|
|
705e196b6c | ||
|
|
7b5223d83d | ||
|
|
f164085227 | ||
|
|
31bf6f0c25 | ||
|
|
48191dd748 | ||
|
|
c4f29d24da | ||
|
|
db7890660e | ||
|
|
9adc33efbb | ||
|
|
8f65aba04b | ||
|
|
3a0082f0f1 | ||
|
|
14792cdbc6 | ||
|
|
4939987eb8 | ||
|
|
4bca62a0bd | ||
|
|
82e2be4239 | ||
|
|
4550ac6fff | ||
|
|
97856bfebf | ||
|
|
a60a0e52bb | ||
|
|
83a67a1d21 | ||
|
|
12391ec4ba | ||
|
|
e65ed2e44f | ||
|
|
d90044b847 | ||
|
|
d8c1f93de6 | ||
|
|
54d243cd98 | ||
|
|
d74e4642e3 | ||
|
|
a51488cbaa | ||
|
|
04848dfa1c | ||
|
|
78d18d8fc8 | ||
|
|
dc819afa44 | ||
|
|
4a564336fe | ||
|
|
6b7ced80fe | ||
|
|
f60bbdf86b | ||
|
|
8c79f87f02 | ||
|
|
f3beb1236a | ||
|
|
934bed47fa | ||
|
|
038bcd9079 | ||
|
|
6d70f6a4ac | ||
|
|
ce93b2681b | ||
|
|
8d036ed6d8 | ||
|
|
9c53cc1b83 | ||
|
|
3514e89eb3 | ||
|
|
6ff12f5f01 | ||
|
|
ee2a436a5b | ||
|
|
a896125490 | ||
|
|
e083471ec4 | ||
|
|
de9b64834e | ||
|
|
919441d9c4 | ||
|
|
80d31113e5 | ||
|
|
7e2b79984e | ||
|
|
d54cf77356 | ||
|
|
951b6b203b | ||
|
|
44e23b7f4f | ||
|
|
c22a387695 | ||
|
|
1ab4d6a6aa | ||
|
|
96c0ce1f0c | ||
|
|
fe11e9047d | ||
|
|
ce0e17b62b | ||
|
|
303be1866d | ||
|
|
a6113b2315 | ||
|
|
3ca046b408 | ||
|
|
4ec45753e6 | ||
|
|
e6ea5c2703 | ||
|
|
790833f3b2 | ||
|
|
02aecb2fc1 | ||
|
|
7cbca43eb1 | ||
|
|
2f564437ae | ||
|
|
ae4ded7fd1 | ||
|
|
bdd094bc39 | ||
|
|
e6fa410778 | ||
|
|
350c5ff8f8 | ||
|
|
f139a19238 | ||
|
|
e90efd73a2 | ||
|
|
81c907b4bf | ||
|
|
ab49471f33 | ||
|
|
aabf053d2f | ||
|
|
f839bb5a0a | ||
|
|
91130e884b | ||
|
|
2ff655a745 | ||
|
|
0422eda6a2 | ||
|
|
31e6f60847 | ||
|
|
7742238495 | ||
|
|
3ad41fe89d | ||
|
|
f96ed3769f | ||
|
|
a75fafdbe2 | ||
|
|
a58b7874ef | ||
|
|
6990de9c94 | ||
|
|
75a8e81f8f | ||
|
|
519c0077a9 | ||
|
|
734d07a532 | ||
|
|
df93102235 | ||
|
|
39f3d5493b | ||
|
|
14a7ae8586 | ||
|
|
692ff41ef7 | ||
|
|
86409fa93d | ||
|
|
7bc47a14cc | ||
|
|
4a31b31ca6 | ||
|
|
9263be8cca | ||
|
|
73e308079a | ||
|
|
08b24620c0 | ||
|
|
95675b0c9a | ||
|
|
251c1ef6da | ||
|
|
0fa430c1da | ||
|
|
f60b6eb82e | ||
|
|
1ebf6f146a | ||
|
|
8f7fe0405e | ||
|
|
9a34fd5c4a | ||
|
|
b9e3a8b5ac | ||
|
|
f794fe79e3 | ||
|
|
0f9e125cf3 | ||
|
|
d778d9493f | ||
|
|
70d2c2ccc9 | ||
|
|
9dea7020f0 | ||
|
|
990d074f7d | ||
|
|
e413f05397 | ||
|
|
d1b1fee080 | ||
|
|
9738d605e4 | ||
|
|
7ff8128f15 | ||
|
|
10099357b6 | ||
|
|
80b8ce89a4 | ||
|
|
0745736e28 | ||
|
|
0b766288ef | ||
|
|
598ca0569c | ||
|
|
d295ce5708 | ||
|
|
b5a3d79bce | ||
|
|
17a5ff51ff | ||
|
|
0784a0c33a | ||
|
|
3595cb1267 | ||
|
|
0bcb1b679d | ||
|
|
a3017c724e | ||
|
|
07859ef48b | ||
|
|
267d7bf0a9 | ||
|
|
be83dfc52a | ||
|
|
ca88ca753c | ||
|
|
f86d3538f6 | ||
|
|
1c3590078d | ||
|
|
aa158228f9 | ||
|
|
8747834c69 | ||
|
|
2c1e37197b | ||
|
|
50c10a5087 | ||
|
|
9f4ad873bc | ||
|
|
4683a623dc | ||
|
|
06899210a7 | ||
|
|
cbdab62c1e | ||
|
|
8df6112204 | ||
|
|
8e8ddf7233 | ||
|
|
311ab43d4c | ||
|
|
97692bc772 | ||
|
|
21016265e5 | ||
|
|
54120107ce | ||
|
|
9bf5990ea9 | ||
|
|
74f7cf24ae | ||
|
|
fb28aa847b | ||
|
|
0724205f35 | ||
|
|
b72cac4cf3 | ||
|
|
47d715f642 | ||
|
|
bd77f29fc4 | ||
|
|
d1e1205036 | ||
|
|
71753e21e0 | ||
|
|
fde3299bf3 | ||
|
|
1a1f00fa15 | ||
|
|
4a1efabda4 | ||
|
|
3b88a646ec | ||
|
|
2294e53a0b | ||
|
|
f0819cce75 | ||
|
|
1e11b4629f | ||
|
|
5c72a34fa8 | ||
|
|
b9277c8030 | ||
|
|
8c76e1353e | ||
|
|
ad382799b1 | ||
|
|
68de5a6f6a | ||
|
|
4ea31da889 | ||
|
|
0a796505c1 | ||
|
|
37749f4623 | ||
|
|
86e0d272f3 | ||
|
|
8527f22df1 | ||
|
|
b456292295 | ||
|
|
03fdbc3ec2 | ||
|
|
4c773f7068 | ||
|
|
d8e07f2c41 | ||
|
|
5412d730c1 | ||
|
|
fe9f23e632 | ||
|
|
422898d9b3 | ||
|
|
b686bb9c83 | ||
|
|
5e5cdc581d | ||
|
|
02cfa774be | ||
|
|
3a2f89b3c0 | ||
|
|
6135f072d2 | ||
|
|
7331659d3d | ||
|
|
e63a44b734 | ||
|
|
6b14c4ab1e | ||
|
|
4bf90ca67f | ||
|
|
e0655e24f2 | ||
|
|
bfc36aed89 | ||
|
|
be7f67268d | ||
|
|
a982baff27 | ||
|
|
51222cc664 | ||
|
|
f53c5a020e | ||
|
|
5b30bbda92 | ||
|
|
858e2a43df | ||
|
|
df9894e275 | ||
|
|
ca77ee1c0e | ||
|
|
592f2f23a3 | ||
|
|
c49a80db41 | ||
|
|
46275c6547 | ||
|
|
0994ed9783 | ||
|
|
eb95353cb1 | ||
|
|
029758cb20 | ||
|
|
649035677f | ||
|
|
646d6917ed | ||
|
|
d9db7f3308 | ||
|
|
6a8c62f9fd | ||
|
|
4442382c16 | ||
|
|
00124c56d9 | ||
|
|
2c32c2149e | ||
|
|
734f258878 | ||
|
|
0e0c53bba4 | ||
|
|
5cc23ae052 | ||
|
|
6fd088f448 | ||
|
|
d6d770c1b1 | ||
|
|
b07df5cae1 | ||
|
|
c107728676 | ||
|
|
ba5215561f | ||
|
|
4eb45c9a0f | ||
|
|
187129a907 | ||
|
|
284a2b9021 | ||
|
|
0b53e30ecb | ||
|
|
bd2131ba34 | ||
|
|
73a41a725a | ||
|
|
1aec168c84 | ||
|
|
21a549a83b | ||
|
|
8a16a1a1a9 | ||
|
|
ad726b49b4 | ||
|
|
db2241066b | ||
|
|
f1cc16e788 | ||
|
|
3820a905e0 | ||
|
|
2042d4873c | ||
|
|
f9be783f3e | ||
|
|
23773bb32b | ||
|
|
2fd7545b6c | ||
|
|
71b97fd3ac | ||
|
|
03991c5d41 | ||
|
|
9c042a503b | ||
|
|
614060764d | ||
|
|
4a678ad70f | ||
|
|
a3ba8188d7 | ||
|
|
2760fc86af | ||
|
|
abb14aeec1 | ||
|
|
8ceb2a93fd | ||
|
|
c2f16ee846 | ||
|
|
071c004f8b | ||
|
|
6484453fc6 | ||
|
|
a0d0645128 | ||
|
|
1738eb24b1 | ||
|
|
253194e491 | ||
|
|
736e58dd68 | ||
|
|
907a171edd | ||
|
|
ed6d2a100f | ||
|
|
effe131090 | ||
|
|
01498a3e34 | ||
|
|
18063bf25c | ||
|
|
57f0176759 | ||
|
|
dbbed6f7f0 | ||
|
|
7fbfdceba3 | ||
|
|
9dda9fb903 | ||
|
|
f1418a50f0 | ||
|
|
017954e7ea | ||
|
|
806625cbff | ||
|
|
045e30f2c1 | ||
|
|
c6a9a94f94 | ||
|
|
8e7c00f3d4 | ||
|
|
d1ed1da8c6 | ||
|
|
23e8390997 | ||
|
|
71403be912 | ||
|
|
f28d02b7f2 | ||
|
|
e0cb814f3f | ||
|
|
98a08e1644 | ||
|
|
3047121255 | ||
|
|
5a7f92481e | ||
|
|
0d45c38782 | ||
|
|
56d1b227cf | ||
|
|
061fa0635c | ||
|
|
6e138f955e | ||
|
|
bea87a5a20 | ||
|
|
2b4eb87d77 | ||
|
|
799758e54f | ||
|
|
1f9abbee4d | ||
|
|
fdf0ae9167 | ||
|
|
00eb6f6bc9 | ||
|
|
66174692a2 | ||
|
|
849fcf0127 | ||
|
|
209680e89f | ||
|
|
e0c04a2da0 | ||
|
|
27d9bd04e5 | ||
|
|
511424a287 | ||
|
|
bebcf4f004 | ||
|
|
eafa775952 | ||
|
|
66b4a862e0 | ||
|
|
9603489dd3 | ||
|
|
b302c8a5f4 | ||
|
|
4de88e87bb | ||
|
|
b880796aef | ||
|
|
37a5d5d7a0 | ||
|
|
3cac262dd1 | ||
|
|
e6ab4db6b8 | ||
|
|
ca989eb0b3 | ||
|
|
d778d034e7 | ||
|
|
df08fd1f03 | ||
|
|
ac82f416a4 | ||
|
|
f7f9517b6a | ||
|
|
90cff10e2b | ||
|
|
81caf35926 | ||
|
|
5726cef3ca | ||
|
|
5fdf47b118 | ||
|
|
8b74a72b21 | ||
|
|
eec69d6796 | ||
|
|
0537a21b79 | ||
|
|
4c54ed8748 | ||
|
|
a4006e23a0 | ||
|
|
b17dc81540 | ||
|
|
d73c4f09f3 | ||
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e | ||
|
|
eb2934f0c1 | ||
|
|
b7438fe4e6 | ||
|
|
ce6cef6855 | ||
|
|
a966ccd17d | ||
|
|
493c714663 | ||
|
|
e959c5d71c | ||
|
|
4a2928eb49 | ||
|
|
af88772a78 | ||
|
|
1dce6918c2 | ||
|
|
9109148474 | ||
|
|
eaaf05a7cc | ||
|
|
52e21bc853 | ||
|
|
16e1a25bc0 | ||
|
|
958661cbb5 | ||
|
|
6019628f7d | ||
|
|
0987069e37 | ||
|
|
6a0372be6c | ||
|
|
c13afd56e8 | ||
|
|
96997d2b21 | ||
|
|
86a3319d41 | ||
|
|
a694ba93d9 | ||
|
|
9f60e84ce1 | ||
|
|
a9aaea0d67 | ||
|
|
572b1721b2 | ||
|
|
b0e1d4ce78 | ||
|
|
eb19c8af40 | ||
|
|
746f1585eb | ||
|
|
2d58a8d861 | ||
|
|
0037951b6e | ||
|
|
fbd1c5f51a | ||
|
|
1c6781757c | ||
|
|
b4e3956e69 | ||
|
|
c51229493b | ||
|
|
631d55aa22 | ||
|
|
8a291e1dc0 | ||
|
|
650dccfa9e | ||
|
|
d08b4b147d | ||
|
|
9a703befe6 | ||
|
|
9a1615768d | ||
|
|
37da0c647e | ||
|
|
2acb530ccd | ||
|
|
3e1fb17b70 | ||
|
|
a89d6b8e3d | ||
|
|
1c085f7d1a | ||
|
|
4b6585d249 | ||
|
|
9ffad7fceb | ||
|
|
18725679c4 | ||
|
|
ba8a8ad818 | ||
|
|
102ad60dee | ||
|
|
cb61e50b51 | ||
|
|
859ef52886 | ||
|
|
f04a1f220c | ||
|
|
cd380251b3 | ||
|
|
92cd1eed45 | ||
|
|
db32a24cb6 | ||
|
|
2d96940826 | ||
|
|
e730da1438 | ||
|
|
46ee8659b4 | ||
|
|
73a6b4ea11 | ||
|
|
c1b88c17cc | ||
|
|
a359e36e35 | ||
|
|
0a2e6d58a5 | ||
|
|
7e80afdd7f | ||
|
|
1b119557c2 | ||
|
|
7778fef6bb | ||
|
|
ea1803417f | ||
|
|
ea5094e842 | ||
|
|
5a974fb10c | ||
|
|
9acdeab73d | ||
|
|
d19b434ffc | ||
|
|
17a1eda702 | ||
|
|
7d50a0cfea | ||
|
|
ceff7bcca5 | ||
|
|
7d1734d033 | ||
|
|
03ec6adfd0 | ||
|
|
309b10f201 | ||
|
|
2a8e40f19f | ||
|
|
5f7bd2b1da | ||
|
|
c097ce9c32 | ||
|
|
caad314faa | ||
|
|
bc2ebe0021 | ||
|
|
21e8440423 | ||
|
|
11aa393ba7 | ||
|
|
d0c910a6f3 | ||
|
|
81c90ae430 | ||
|
|
d15a5ad4cc | ||
|
|
0ff246653b | ||
|
|
113bcbdb78 | ||
|
|
95411228db | ||
|
|
23774353b7 | ||
|
|
052b5262ff | ||
|
|
a2a5ec93d3 | ||
|
|
331c517a5b | ||
|
|
27a774cbe9 | ||
|
|
8e6787a302 | ||
|
|
59352d0ac2 | ||
|
|
75d44b3bae | ||
|
|
95ae6c4b49 | ||
|
|
98ca770f81 | ||
|
|
ccd967e3be | ||
|
|
0ebb73ee2e | ||
|
|
c8b84a0e9e | ||
|
|
3acb5cff45 | ||
|
|
ab801ad3d4 | ||
|
|
74116204ce | ||
|
|
2eb5f934d8 | ||
|
|
b43d376a87 | ||
|
|
e4a44f6224 | ||
|
|
0272973175 | ||
|
|
adca28801d | ||
|
|
d2a3f92452 | ||
|
|
ede86845e5 | ||
|
|
e57c742674 | ||
|
|
bb5976d727 | ||
|
|
670724184c | ||
|
|
f7c1a59de1 | ||
|
|
01a2ccc52f | ||
|
|
51ba1dac49 | ||
|
|
a4463dd40f | ||
|
|
83a82d818e | ||
|
|
1d1c4430b2 | ||
|
|
4e00b47b52 | ||
|
|
43e6d1ce2d | ||
|
|
30da442a85 | ||
|
|
038d91feaa | ||
|
|
e7ba78beee | ||
|
|
ab43804efd | ||
|
|
1c865dd119 | ||
|
|
b32d0a5b60 | ||
|
|
79e21601b0 | ||
|
|
34253aa595 | ||
|
|
79ed7ce451 | ||
|
|
900eebb9a4 | ||
|
|
6914b2c99d | ||
|
|
0dd3a08169 | ||
|
|
f8f290e848 | ||
|
|
9179cdfc9d | ||
|
|
76b6dc0112 | ||
|
|
ce303f5c7e | ||
|
|
b4b7a18497 | ||
|
|
1e2ebc9945 | ||
|
|
a49e3647b6 | ||
|
|
954e17c3d0 | ||
|
|
2a9819aff8 | ||
|
|
8049184dcc | ||
|
|
6c6137b2e7 | ||
|
|
19c4f3082b | ||
|
|
433c2831ae | ||
|
|
9138b2b503 | ||
|
|
6d64aab420 | ||
|
|
77509ce391 | ||
|
|
adcaa6f9de | ||
|
|
ce129efa09 | ||
|
|
121164db56 | ||
|
|
195f95196e | ||
|
|
a20d4568a2 | ||
|
|
e656beb915 | ||
|
|
cd04600862 | ||
|
|
3acc0ebb81 | ||
|
|
88daaef76b | ||
|
|
1cdaced8b6 | ||
|
|
0b8255529a | ||
|
|
019fe69a57 | ||
|
|
d90ab904e7 | ||
|
|
6ae30b21c9 | ||
|
|
b16781846e | ||
|
|
5ce82b45da | ||
|
|
e99bc177c0 | ||
|
|
b68bc75dad | ||
|
|
d61eac080b | ||
|
|
d8be9f12a2 | ||
|
|
0cf4539fe8 | ||
|
|
db6bba709a | ||
|
|
2174a22835 | ||
|
|
a8dd7b3eda | ||
|
|
25a55bae6f | ||
|
|
fe157166ca | ||
|
|
f7259adf83 | ||
|
|
6669560cb9 | ||
|
|
b46ab7e921 | ||
|
|
27266f8a54 | ||
|
|
1b6ba0d062 | ||
|
|
0d0f09ee66 | ||
|
|
f200a7fb6a | ||
|
|
98691f75bc | ||
|
|
47e304d03c | ||
|
|
9108abf204 | ||
|
|
6529dcb3b5 | ||
|
|
4b22b288a6 | ||
|
|
abbf6ce6cc | ||
|
|
416ec316bd | ||
|
|
5ffc733eec | ||
|
|
8a23988711 | ||
|
|
35212b673e | ||
|
|
57ff9abca2 | ||
|
|
4752323e1c | ||
|
|
11593c6cc4 | ||
|
|
10025bda45 | ||
|
|
b800541fbe | ||
|
|
3a73f1ead5 | ||
|
|
1340281cb8 | ||
|
|
b9be841fd2 | ||
|
|
73890f31af | ||
|
|
456b2ef6eb | ||
|
|
ad8b53e6d4 | ||
|
|
0b5d1bc91d | ||
|
|
c43da3005a | ||
|
|
ca4c15bc63 | ||
|
|
a880283593 | ||
|
|
e464a5bfbc | ||
|
|
eb6bf454f1 | ||
|
|
ec06089eda | ||
|
|
0c4be55936 | ||
|
|
11d21d5d1b | ||
|
|
f9648d3976 | ||
|
|
2955aae8e4 | ||
|
|
9fd836e51f | ||
|
|
518f44908c | ||
|
|
38f60b3c1d | ||
|
|
e2c71717f8 | ||
|
|
7764c542f2 | ||
|
|
aa6468932b | ||
|
|
30104cb12b | ||
|
|
d53e560ce0 | ||
|
|
44c8af66ad | ||
|
|
68aaa5bbc3 | ||
|
|
17747db93f | ||
|
|
3fe27c8411 | ||
|
|
14b1c9f8e4 | ||
|
|
d84fc58cac | ||
|
|
187c3f62df | ||
|
|
4a447a439a | ||
|
|
4bfc50411c | ||
|
|
5e8392c8ef | ||
|
|
d3c81a6e93 | ||
|
|
7342b5355f | ||
|
|
1341bf5a9e | ||
|
|
48aebf2d9d | ||
|
|
7b14e9b660 | ||
|
|
07eb24b775 | ||
|
|
cd849bc2ff | ||
|
|
9c66812b99 | ||
|
|
ec91fa55db | ||
|
|
00d3cc4b69 | ||
|
|
14ff7f5fcf | ||
|
|
a97ce3c96e | ||
|
|
cfc5681b36 | ||
|
|
369a876ebe | ||
|
|
778e9c864f | ||
|
|
a2616b8227 | ||
|
|
8d8f28eae4 | ||
|
|
e7d7d5232c | ||
|
|
1d65ef3201 | ||
|
|
8d425e3372 | ||
|
|
3939c6f6e7 | ||
|
|
60d91234b9 | ||
|
|
37c14207d6 | ||
|
|
3b9fbf80ad | ||
|
|
c2fdf73491 | ||
|
|
143f9371c6 | ||
|
|
3f1902face | ||
|
|
c0adb52213 | ||
|
|
3520e946a2 | ||
|
|
f38adc1865 | ||
|
|
d5ff1c8e3b | ||
|
|
ad7417bc50 | ||
|
|
2d17c16d93 | ||
|
|
aded0bc81a | ||
|
|
36d36fab0b | ||
|
|
ba756cf366 | ||
|
|
c00d410e61 | ||
|
|
968342c732 | ||
|
|
5c15656c55 | ||
|
|
2e8fc6ebfe | ||
|
|
efe9fe6124 | ||
|
|
30c251efd3 | ||
|
|
c850905e43 | ||
|
|
a317a2531c | ||
|
|
ee20ebe07a | ||
|
|
2743d4ca87 | ||
|
|
6136a963c8 | ||
|
|
60417950c7 | ||
|
|
fa211f6a10 | ||
|
|
72e0745e2f | ||
|
|
aa4d1021eb | ||
|
|
93e7e4a0e5 | ||
|
|
c2f7cd1104 | ||
|
|
38eef5ce4c | ||
|
|
4cf80f96ad | ||
|
|
d4af132fc4 | ||
|
|
c087a05b43 | ||
|
|
cdb0e6ffed | ||
|
|
03b84091fc | ||
|
|
2be20588bf | ||
|
|
e59ee14f40 | ||
|
|
21a37e3393 | ||
|
|
810a4f0723 | ||
|
|
4c266df863 | ||
|
|
abd999f64a | ||
|
|
04de19c870 | ||
|
|
648cb13e02 | ||
|
|
174f428571 | ||
|
|
5388ae4acb | ||
|
|
2e338e84cb | ||
|
|
5089a7167d | ||
|
|
c0ac25bfff | ||
|
|
27a1f3ed2b | ||
|
|
90f36c1389 | ||
|
|
91817d0d1a | ||
|
|
55a3b071ea | ||
|
|
a38ce29137 | ||
|
|
7cea3f7da4 | ||
|
|
dcffd87e08 | ||
|
|
3b813148b3 | ||
|
|
4bba2cd034 | ||
|
|
28a1a17187 | ||
|
|
a5e879fbe4 | ||
|
|
f7f12b8604 | ||
|
|
cf5d051afc | ||
|
|
2f681bed57 | ||
|
|
2d0f65a5e3 | ||
|
|
b1705599e1 | ||
|
|
969b2d2110 | ||
|
|
f4b2ed2a92 | ||
|
|
dee3cf2d7f | ||
|
|
b460b5967f | ||
|
|
21058c34d0 | ||
|
|
5b1e6c7dbc | ||
|
|
691dc04fac | ||
|
|
e92434c2e7 | ||
|
|
cae09d8b84 | ||
|
|
f706a5b4c8 | ||
|
|
c54e3b4ea3 | ||
|
|
0fff9f9fa6 | ||
|
|
0becf7f03f | ||
|
|
972d876ca9 | ||
|
|
b8cb21c954 | ||
|
|
67062840c1 | ||
|
|
53f0cc1340 | ||
|
|
9626a981bc | ||
|
|
b912c8f035 | ||
|
|
fa13fe2184 | ||
|
|
85a1956e5c | ||
|
|
72743d1590 | ||
|
|
7ed1077879 | ||
|
|
16d7b90adf | ||
|
|
94424e14d7 | ||
|
|
e79874f58e | ||
|
|
8aae8b1d27 | ||
|
|
1813ff9dfa | ||
|
|
4ac31ea82b | ||
|
|
67ca157329 | ||
|
|
7c061fa3b6 | ||
|
|
f5e1b3d09e | ||
|
|
3ba4804d6c | ||
|
|
216de230e2 | ||
|
|
a91cfa03e7 | ||
|
|
087aaaf894 | ||
|
|
cbb7a09376 | ||
|
|
1a956424e0 | ||
|
|
f9aa239973 | ||
|
|
2073b79633 | ||
|
|
6a6a30c33c | ||
|
|
1cd5d7942f | ||
|
|
63e9005f01 | ||
|
|
d55f4336ae | ||
|
|
535efd34a0 | ||
|
|
e11e4bcbc7 | ||
|
|
4915433bd2 | ||
|
|
43d6e3ae06 | ||
|
|
225b812b5e | ||
|
|
a53cb236bb | ||
|
|
761992ca89 | ||
|
|
96ed0991b5 | ||
|
|
a42df3d364 | ||
|
|
ff94b1b0a9 | ||
|
|
b1845c6c83 | ||
|
|
62b1da3e2c | ||
|
|
d26b24f670 | ||
|
|
8ba40e492c | ||
|
|
2c372a9894 | ||
|
|
3d3b75fb8d | ||
|
|
142b057be8 | ||
|
|
4790868878 | ||
|
|
342ade03f6 | ||
|
|
1a0b7f58f9 | ||
|
|
9407dbf387 | ||
|
|
423aeb0d81 | ||
|
|
790323ac37 | ||
|
|
920b863955 | ||
|
|
c1382c09d9 | ||
|
|
febe9cc26a | ||
|
|
2ce2e88adf | ||
|
|
c7599d323b | ||
|
|
e906b511e9 | ||
|
|
4dd07e5763 | ||
|
|
d93bdea433 | ||
|
|
26cfd52e7e | ||
|
|
2d7a96342c | ||
|
|
cdd6c9f52e | ||
|
|
5e529a1c96 | ||
|
|
7fee96e9de | ||
|
|
5686a7e273 | ||
|
|
b91040f7fb | ||
|
|
566e0e2048 | ||
|
|
3aad09be28 | ||
|
|
f0358acb32 | ||
|
|
fd0de4ab32 | ||
|
|
73a308502f | ||
|
|
bd59f150b8 | ||
|
|
4b68d69188 | ||
|
|
f90422a890 | ||
|
|
8befedef14 | ||
|
|
2af3004409 | ||
|
|
38ee40d59c | ||
|
|
d583f1ac0e | ||
|
|
2bcb02f628 | ||
|
|
4301a33371 | ||
|
|
f1a03a4ee8 | ||
|
|
167ddf9c9c | ||
|
|
f833e41e69 | ||
|
|
3780ec699f | ||
|
|
41688a936b | ||
|
|
b2db8123ec | ||
|
|
b330c2c57e | ||
|
|
231c5cf6de | ||
|
|
7214a0160a | ||
|
|
375b79f11b | ||
|
|
661068d1a2 | ||
|
|
3da1869d5e | ||
|
|
000a7aa094 | ||
|
|
7cedc5369d | ||
|
|
e5ecd20d44 | ||
|
|
53aaa5d2a5 | ||
|
|
cccf2de129 | ||
|
|
9d39fb3604 | ||
|
|
4a007e3767 | ||
|
|
95814359bd | ||
|
|
de6c286258 | ||
|
|
d0ae69087c | ||
|
|
5e15b0b844 | ||
|
|
7ea026ff1d | ||
|
|
6e0575a53d | ||
|
|
fb9be81fab | ||
|
|
60791d6dd1 | ||
|
|
eba423bb9d | ||
|
|
301de169e9 | ||
|
|
1868c7016d | ||
|
|
0c71ce3398 | ||
|
|
bc285cf0dd | ||
|
|
209a3ee7af | ||
|
|
7d19ab9f62 | ||
|
|
3f6d624c7b | ||
|
|
c138272d63 | ||
|
|
7dbfea1353 | ||
|
|
c121d27f31 | ||
|
|
43c19a6b82 | ||
|
|
6542bc4a03 | ||
|
|
bede525dc9 | ||
|
|
e45c90060f | ||
|
|
7d79c723e5 | ||
|
|
5f6d6c3b70 | ||
|
|
d15042470e | ||
|
|
f1f414ca59 | ||
|
|
cdf4815a6b | ||
|
|
fade056244 | ||
|
|
a546047c95 | ||
|
|
ea210319ce | ||
|
|
2896e780ae | ||
|
|
eaafb23535 | ||
|
|
baa30f4289 | ||
|
|
2164984d2b | ||
|
|
189c861835 | ||
|
|
6656fa3066 | ||
|
|
0cc2ed04f5 | ||
|
|
b11adfa5cd | ||
|
|
9baeda781a | ||
|
|
bd032d13ff | ||
|
|
730775bb4e | ||
|
|
d31eaddba3 | ||
|
|
3202f78f0f | ||
|
|
6de410a0aa | ||
|
|
a3f41c7049 | ||
|
|
1847f17f50 | ||
|
|
1bc32215b9 | ||
|
|
96009975d6 | ||
|
|
de9b391db3 | ||
|
|
a62572fb86 | ||
|
|
011a2c0b78 | ||
|
|
daf4418cbb | ||
|
|
0f8df3d340 | ||
|
|
9cac385aec | ||
|
|
814ddc0923 | ||
|
|
247795dd36 | ||
|
|
dfadf70a7f | ||
|
|
d348ec0f6c | ||
|
|
b730bd1396 | ||
|
|
56e0c6adf8 | ||
|
|
f44a960dcd | ||
|
|
6c1bbf918d | ||
|
|
48e614b167 | ||
|
|
fe8d33452b | ||
|
|
c19ece6921 | ||
|
|
216fa57b88 | ||
|
|
a9558ae248 | ||
|
|
ee9077db7d | ||
|
|
9c85928740 | ||
|
|
af0309371e | ||
|
|
ead3c186a6 | ||
|
|
6ac48a65cb | ||
|
|
2ecf5ba1de | ||
|
|
94f1a1dea3 | ||
|
|
c045ae15e7 | ||
|
|
1756b7c6ff | ||
|
|
e25ace2151 | ||
|
|
a8e5a86fa0 | ||
|
|
f8edc233ab | ||
|
|
337c2a7cb4 | ||
|
|
2d735144b9 | ||
|
|
0113035237 | ||
|
|
52a1d248b2 | ||
|
|
b5ed42c845 | ||
|
|
d9e7cadacf | ||
|
|
36e88cbd50 | ||
|
|
6d76efb9bb | ||
|
|
a1de9cec58 | ||
|
|
6885c72f32 | ||
|
|
0f1389e992 | ||
|
|
5bf3eeaa77 | ||
|
|
9dda1fd624 | ||
|
|
2dc46cb153 | ||
|
|
0674c0075e | ||
|
|
518ef670da | ||
|
|
0dd626ec67 | ||
|
|
53f4c0fdc0 | ||
|
|
7e3ea77fdf | ||
|
|
7290d23b26 | ||
|
|
24f20eb1bd | ||
|
|
4c9de098b0 | ||
|
|
a2ccba69e5 | ||
|
|
8eb99d3a87 | ||
|
|
3773874cd3 | ||
|
|
6c62b1a2ea | ||
|
|
b768645fde | ||
|
|
7b58dcb28c | ||
|
|
fea4a1e68e | ||
|
|
a9e83dd42c | ||
|
|
145f501a21 | ||
|
|
9b3b04ecec | ||
|
|
3e063cca5c | ||
|
|
27d716c663 | ||
|
|
fbd15cb7b7 | ||
|
|
f7c91eff54 | ||
|
|
a6bdc086a2 | ||
|
|
1242dd951a | ||
|
|
d1c8e9f31b | ||
|
|
83ccae6c8b | ||
|
|
086be07bf5 | ||
|
|
28f9c477a8 | ||
|
|
09571d03a5 | ||
|
|
71ce63f79c | ||
|
|
5205c9591f | ||
|
|
9a547dcbfb | ||
|
|
27632ca6ec | ||
|
|
c7470e6e6e | ||
|
|
d090a17ed0 | ||
|
|
c2529260e7 | ||
|
|
da87188ff8 | ||
|
|
d099039f5d | ||
|
|
5dd9cf4398 | ||
|
|
ab77b216d1 | ||
|
|
b37a02cddf | ||
|
|
c3c3e9087b | ||
|
|
7ad6bc955f | ||
|
|
7a5271ad96 | ||
|
|
1b122526aa | ||
|
|
a3b266761e | ||
|
|
498389123e | ||
|
|
bc61417284 | ||
|
|
97d952e61c | ||
|
|
073aac3d92 | ||
|
|
eff4127efd | ||
|
|
b1c0c32ba6 | ||
|
|
f14bf25cb9 | ||
|
|
f216670814 | ||
|
|
6ecc98fddb | ||
|
|
4843affd0e | ||
|
|
558785a4bb | ||
|
|
60d415bb8a | ||
|
|
45e22cf8aa | ||
|
|
e4900b99d7 | ||
|
|
20766069a8 | ||
|
|
e8160c9fae | ||
|
|
957ecb1b64 | ||
|
|
ac5061df2c | ||
|
|
cddb2714ef | ||
|
|
d7d9cac20b | ||
|
|
6817c5ea58 | ||
|
|
4cd6ca02c7 | ||
|
|
a5efcbab51 | ||
|
|
85be7b39ac | ||
|
|
ebf3dda449 | ||
|
|
582953260b | ||
|
|
2d1ea86fc6 | ||
|
|
322385f1b6 | ||
|
|
1b38aed05f | ||
|
|
a69c98e394 | ||
|
|
282c9f790a | ||
|
|
2eeb0e6a0b | ||
|
|
3ff5bf2369 | ||
|
|
69ee28a082 | ||
|
|
d02deff3d7 | ||
|
|
3e78ea8acc | ||
|
|
b54c0f0ef3 | ||
|
|
c79358c67e | ||
|
|
75107d7698 | ||
|
|
c4464e36c8 | ||
|
|
d92db198d1 | ||
|
|
8bae956df6 | ||
|
|
7758524703 | ||
|
|
c82fa2c829 | ||
|
|
a51280fd20 | ||
|
|
bd437c1c17 | ||
|
|
69fb68ef0b | ||
|
|
787dbaff36 | ||
|
|
c50ae1fdbe | ||
|
|
bde0f444db | ||
|
|
6a8298b137 | ||
|
|
f19cbfad5c | ||
|
|
78f2183e70 | ||
|
|
5c11a46412 | ||
|
|
8a94aebdb8 | ||
|
|
ec11e99667 | ||
|
|
37d066b563 | ||
|
|
bfec5fe200 | ||
|
|
525287f4b6 | ||
|
|
9054ce73b2 | ||
|
|
d079adc167 | ||
|
|
a9d401ac10 | ||
|
|
1fa65c7f2f | ||
|
|
cc9b63eb51 | ||
|
|
7e12eab3ad | ||
|
|
fa685d7d9c | ||
|
|
4314ee1670 | ||
|
|
7d636a7c13 | ||
|
|
bf9d51cf14 | ||
|
|
29e0727b58 | ||
|
|
c434dff0a4 | ||
|
|
b2a8cb4aba | ||
|
|
b412a222ae | ||
|
|
79bcb705bf | ||
|
|
9f81d014f1 | ||
|
|
3184205519 | ||
|
|
7c919329e8 | ||
|
|
db41953618 | ||
|
|
cea078a593 | ||
|
|
f44cfb2863 | ||
|
|
1b45be0d60 | ||
|
|
6bb693488c | ||
|
|
e20e08d700 | ||
|
|
ac07df2985 | ||
|
|
2054ca5c9a | ||
|
|
e51e465543 | ||
|
|
2bbc6a83e8 | ||
|
|
a78731a3ba | ||
|
|
f4e779c964 | ||
|
|
a973402821 | ||
|
|
44decbeae0 | ||
|
|
3ea1be3c52 | ||
|
|
2642e12d14 | ||
|
|
e7276b7b9b | ||
|
|
e375341c33 | ||
|
|
2c20716f37 | ||
|
|
928f5b0564 | ||
|
|
91f21ddc47 | ||
|
|
43a3778b45 | ||
|
|
b9b1bfefe7 | ||
|
|
05cda35b14 | ||
|
|
2155e74951 | ||
|
|
4714958e99 | ||
|
|
c6e62b9175 | ||
|
|
ab66b23194 | ||
|
|
73f9d8a636 | ||
|
|
541a778d7b | ||
|
|
d49f2ec19c | ||
|
|
8dd63a462f | ||
|
|
9902c9baaa | ||
|
|
7de29e6e6b | ||
|
|
336460f67e | ||
|
|
95e89f1712 | ||
|
|
886ae15464 | ||
|
|
d8af244708 | ||
|
|
c8243706b4 | ||
|
|
30707659b5 | ||
|
|
90c365a174 | ||
|
|
7b732b566f | ||
|
|
ba52a925f9 | ||
|
|
fdda5f98c6 | ||
|
|
fa4d627b57 | ||
|
|
2c3e34f001 | ||
|
|
7f8f1ad4e3 | ||
|
|
6f992134a2 | ||
|
|
0c80bf45d0 | ||
|
|
2777956581 | ||
|
|
b207520d98 | ||
|
|
2196fd9cd5 | ||
|
|
ef6304c5c2 | ||
|
|
6b984410d5 | ||
|
|
813e0fc1a8 | ||
|
|
38cf263409 | ||
|
|
6f6a2214fc | ||
|
|
791821d590 | ||
|
|
9a951da881 | ||
|
|
e7a0be5bd3 | ||
|
|
ff932ca2a0 | ||
|
|
818d3bcaf5 | ||
|
|
45b1c66195 | ||
|
|
da04cb91ce | ||
|
|
cfc9cfd84a | ||
|
|
ea18e51f4d | ||
|
|
3d3beb6a9d | ||
|
|
27b8f18cce | ||
|
|
bf545dc320 | ||
|
|
f001e99fcd | ||
|
|
b4bfdc92cc | ||
|
|
ae654831aa | ||
|
|
1ffa983a9d | ||
|
|
ecf1566266 | ||
|
|
c5b87f93dd | ||
|
|
b1a2169dcc | ||
|
|
d45a1808f2 | ||
|
|
db2155551a | ||
|
|
09d35d3b4c | ||
|
|
5b9342d35c | ||
|
|
8d98662633 | ||
|
|
7fdeb44372 | ||
|
|
59dced8237 | ||
|
|
496f4a7dc7 | ||
|
|
8b880a246a | ||
|
|
eeb5942b6b | ||
|
|
7ec904d67b | ||
|
|
c9212819af | ||
|
|
10fd53d6bb | ||
|
|
3fea1d5e35 | ||
|
|
35ecc04223 | ||
|
|
3ca9f5ffa3 | ||
|
|
2e9fed1a14 | ||
|
|
6b92f3fd99 | ||
|
|
06e30b5aa1 | ||
|
|
603cf2a8bb | ||
|
|
a54cdb9587 | ||
|
|
ed4bd20a7c | ||
|
|
cfd12914e1 | ||
|
|
c55aeaf814 | ||
|
|
fdf65aa9b9 | ||
|
|
69b2aacf5a | ||
|
|
0af62d35a0 | ||
|
|
7c32f3f554 | ||
|
|
cec8cdb35e | ||
|
|
5ab9cc029d | ||
|
|
667f42515a |
17
.github/ISSUE_TEMPLATE.md
vendored
17
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,3 +1,12 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
@@ -15,6 +24,8 @@
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
@@ -30,8 +41,6 @@
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Version used (`minio --version`):
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -24,6 +24,8 @@ assignees: ''
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
@@ -39,8 +41,6 @@ assignees: ''
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Version used (`minio --version`):
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: MinIO Community Support
|
||||
url: https://slack.min.io
|
||||
about: Join here for Community Support
|
||||
- name: MinIO SUBNET Support
|
||||
url: https://min.io/pricing
|
||||
about: Join here for Enterprise Support
|
||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -16,4 +16,3 @@
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Functional tests needed (If yes, add [mint](https://github.com/minio/mint) PR # here: )
|
||||
|
||||
39
.github/lock.yml
vendored
Normal file
39
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: true
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
60
.github/stale.yml
vendored
Normal file
60
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 30
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 15
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >-
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed after 15 days if no further activity
|
||||
occurs. Thank you for your contributions.
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 1
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
52
.github/workflows/go.yml
vendored
Normal file
52
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x, 1.15.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
make verify-healing
|
||||
cd browser && npm install && npm run test && cd ..
|
||||
bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
33
.golangci.yml
Normal file
33
.golangci.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- typecheck
|
||||
- goimports
|
||||
- misspell
|
||||
- govet
|
||||
- golint
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- pkg/rpc
|
||||
- pkg/argon2
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
167
.goreleaser.yml
Normal file
167
.goreleaser.yml
Normal file
@@ -0,0 +1,167 @@
|
||||
project_name: minio
|
||||
|
||||
release:
|
||||
name_template: "Version {{.MinIO.Version}}"
|
||||
disable: true
|
||||
github:
|
||||
owner: minio
|
||||
name: minio
|
||||
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- make clean
|
||||
- go generate ./...
|
||||
- go mod tidy
|
||||
- go mod download
|
||||
|
||||
builds:
|
||||
-
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
- freebsd
|
||||
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
- ppc64le
|
||||
- s390x
|
||||
|
||||
goarm:
|
||||
- 7
|
||||
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
- goos: freebsd
|
||||
goarch: arm64
|
||||
- goos: freebsd
|
||||
goarch: ppc64le
|
||||
- goos: freebsd
|
||||
goarch: s390x
|
||||
|
||||
flags:
|
||||
- -tags=kqueue
|
||||
- -trimpath
|
||||
|
||||
ldflags:
|
||||
- "-s -w -X github.com/minio/minio/cmd.Version={{.Version}} -X github.com/minio/minio/cmd.ReleaseTag={{.Tag}} -X github.com/minio/minio/cmd.CommitID={{.FullCommit}} -X github.com/minio/minio/cmd.ShortCommitID={{.ShortCommit}}"
|
||||
|
||||
archives:
|
||||
-
|
||||
format: binary
|
||||
name_template: "{{ .Binary }}-release/{{ .Os }}-{{ .Arch }}/{{ .Binary }}.{{ .Version }}"
|
||||
|
||||
nfpms:
|
||||
-
|
||||
id: minio
|
||||
package_name: minio
|
||||
vendor: MinIO, Inc.
|
||||
homepage: https://min.io/
|
||||
maintainer: dev@min.io
|
||||
description: MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
license: Apache 2.0
|
||||
bindir: /usr/bin
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
overrides:
|
||||
deb:
|
||||
file_name_template: "{{ .Binary }}-release/debs/{{ .ProjectName }}-{{ .Version }}_{{ .Arch }}"
|
||||
replacements:
|
||||
arm: armv7
|
||||
files:
|
||||
"NOTICE": "/usr/share/minio/NOTICE"
|
||||
"CREDITS": "/usr/share/minio/CREDITS"
|
||||
"LICENSE": "/usr/share/minio/LICENSE"
|
||||
"README.md": "/usr/share/minio/README.md"
|
||||
rpm:
|
||||
file_name_template: "{{ .Binary }}-release/rpms/{{ .ProjectName }}-{{ .Version }}.{{ .Arch }}"
|
||||
replacements:
|
||||
amd64: x86_64
|
||||
arm64: aarch64
|
||||
arm: armv7
|
||||
files:
|
||||
"NOTICE": "/usr/share/minio/NOTICE"
|
||||
"CREDITS": "/usr/share/minio/CREDITS"
|
||||
"LICENSE": "/usr/share/minio/LICENSE"
|
||||
"README.md": "/usr/share/minio/README.md"
|
||||
|
||||
checksum:
|
||||
algorithm: sha256
|
||||
|
||||
signs:
|
||||
-
|
||||
signature: "${artifact}.minisig"
|
||||
cmd: "sh"
|
||||
args:
|
||||
- '-c'
|
||||
- 'minisign -s /media/${USER}/minio/minisign.key -qQSm ${artifact} < /media/${USER}/minio/minisign-passphrase'
|
||||
artifacts: all
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^Update yaml files'
|
||||
|
||||
dockers:
|
||||
-
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
dockerfile: Dockerfile.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}
|
||||
- minio/minio:latest
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: ppc64le
|
||||
dockerfile: Dockerfile.ppc64le.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-ppc64le
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: s390x
|
||||
dockerfile: Dockerfile.s390x.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-s390x
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
goarm: ''
|
||||
dockerfile: Dockerfile.arm64.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-arm64
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: '7'
|
||||
dockerfile: Dockerfile.arm.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-arm
|
||||
0
.nancy-ignore
Normal file
0
.nancy-ignore
Normal file
58
.travis.yml
58
.travis.yml
@@ -1,58 +0,0 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- make test-race
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- cd browser && npm install && npm run test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do go test -v --timeout 20m "$d"; done
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
@@ -1,4 +1,4 @@
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Contribution Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
``MinIO`` community welcomes your contribution. To make the process as seamless as possible, we recommend you read this contribution guide.
|
||||
|
||||
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13-alpine
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -9,24 +9,27 @@ ENV GO111MODULE on
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.10
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY CREDITS /third_party/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
|
||||
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
40
Dockerfile.cicd
Normal file
40
Dockerfile.cicd
Normal file
@@ -0,0 +1,40 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /licenses/CREDITS
|
||||
COPY --from=builder /go/minio/LICENSE /licenses/LICENSE
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio", "server", "/data"]
|
||||
@@ -1,22 +1,26 @@
|
||||
FROM alpine:3.10
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY minio /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
ENV MINIO_UPDATE=off \
|
||||
MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
12
Dockerfile.dev.browser
Normal file
12
Dockerfile.dev.browser
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
git golang make npm && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH=$PATH:/root/go/bin
|
||||
|
||||
RUN go get github.com/go-bindata/go-bindata/go-bindata && \
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
@@ -1,17 +1,11 @@
|
||||
FROM ubuntu:16.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
ENV GOPATH /usr/local
|
||||
|
||||
ENV GOPATH /usr/local/gopath
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
COPY mint /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
|
||||
@@ -1,32 +1,39 @@
|
||||
FROM golang:1.13-alpine
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2020-11-25T22-36-25Z" \
|
||||
release="RELEASE.2020-11-25T22-36-25Z" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
FROM alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.13
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
ENV GO111MODULE=on
|
||||
ENV SIMPLE_CI 1
|
||||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo
|
||||
RUN touch /etc/sudoers
|
||||
|
||||
RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
|
||||
RUN echo "ci ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
RUN echo "Defaults env_reset" >> /etc/sudoers
|
||||
RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go:/usr/local/go/bin"' >> /etc/sudoers
|
||||
|
||||
RUN mkdir -p /home/ci/.cache
|
||||
|
||||
RUN groupadd -g 999 ci && \
|
||||
useradd -r -u 999 -g ci ci && \
|
||||
chown -R ci:ci /go /home/ci && \
|
||||
chmod -R a+rw /go
|
||||
|
||||
USER ci
|
||||
|
||||
# -- tests --
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN make test-race
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
|
||||
## -- add healing tests
|
||||
RUN make verify-healing
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
ENV SIMPLE_CI 1
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
COPY mint /mint
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV SIMPLE_CI 1
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
RUN /usr/bin/gateway-tests.sh
|
||||
53
Makefile
53
Makefile
@@ -8,8 +8,6 @@ GOOS := $(shell go env GOOS)
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
TAG ?= "minio/minio:$(VERSION)"
|
||||
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
|
||||
all: build
|
||||
|
||||
checks:
|
||||
@@ -18,22 +16,19 @@ checks:
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && GO111MODULE=off go get -u golang.org/x/lint/golint)
|
||||
ifeq ($(GOARCH),s390x)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
|
||||
else
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2019.2.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
endif
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && GO111MODULE=off go get github.com/tinylib/msgp)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && GO111MODULE=off go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
|
||||
vet:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -42,21 +37,12 @@ fmt:
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
staticcheck:
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
@@ -71,21 +57,27 @@ test-race: verifiers build
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
verify-healing:
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
hotfix: LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#'))
|
||||
hotfix: install
|
||||
|
||||
docker: checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
@@ -101,3 +93,4 @@ clean:
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
@rm -rvf .verify*
|
||||
|
||||
81
README.md
81
README.md
@@ -1,28 +1,32 @@
|
||||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Using MinIO build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
docker pull minio/minio
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### Edge
|
||||
```
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](http://brew.sh/)
|
||||
Install minio packages using [Homebrew](https://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
@@ -84,7 +88,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -94,23 +98,6 @@ GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
### iptables
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
### ufw
|
||||
|
||||
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
|
||||
@@ -145,8 +132,25 @@ Note that `permanent` makes sure the rules are persistent across firewall start,
|
||||
firewall-cmd --reload
|
||||
```
|
||||
|
||||
### iptables
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Test using MinIO Browser
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
|
||||

|
||||
|
||||
@@ -159,20 +163,23 @@ When deployed on a single drive, MinIO server lets clients access any pre-existi
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster and restart them, as shown in the following command from the MinIO client (mc):
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
**Important things to remember during upgrades**:
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
|
||||
### Important things to remember during MinIO upgrades
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` until all clusters have been updated.
|
||||
- If you are updating the server it is always recommended (unless explicitly mentioned in MinIO server release notes), to update `mc` once all the servers have been upgraded using `mc update`.
|
||||
- `mc admin update` is disabled in docker/container environments, container environments provide their own mechanisms for updating running containers.
|
||||
- If you are using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If you are using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
@@ -185,11 +192,5 @@ mc admin update <minio alias, e.g., myminio>
|
||||
## Contribute to MinIO Project
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## Caveats
|
||||
MinIO in its default mode doesn't use MD5Sum checkums of incoming streams unless requested by the client in `Content-Md5` header for validation. This may lead to incompatibility with rare S3 clients like `s3ql` which unfortunately do not set `Content-Md5` but depend on hex MD5Sum for the stream to be calculated by the server. MinIO considers this as a bug in `s3ql` and should be fixed on the client side because MD5Sum is a poor way to checksum and validate the authenticity of the objects. Although MinIO provides a workaround until client applications are fixed use `--compat` option instead to start the server.
|
||||
```sh
|
||||
./minio --compat server /data
|
||||
```
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
118
README_zh_CN.md
118
README_zh_CN.md
@@ -1,4 +1,4 @@
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
# MinIO Quickstart Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO 是一个基于Apache License v2.0开源协议的对象存储服务。它兼容亚马逊S3云存储服务接口,非常适合于存储大容量非结构化的数据,例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等,而一个对象文件可以是任意大小,从几kb到最大5T不等。
|
||||
|
||||
@@ -7,29 +7,33 @@ MinIO是一个非常轻量的服务,可以很简单的和其他应用的结合
|
||||
## Docker 容器
|
||||
### 稳定版
|
||||
```
|
||||
docker pull minio/minio
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### 尝鲜版
|
||||
```
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
> 提示:除非你通过`-it`(TTY交互)参数启动容器,否则Docker将不会显示默认的密钥。一般情况下,并不推荐使用容器的默认密钥,更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
### Homebrew(推荐)
|
||||
使用 [Homebrew](http://brew.sh/)安装minio
|
||||
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
#### Note
|
||||
如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
|
||||
|
||||
```
|
||||
> 提示:如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
|
||||
```sh
|
||||
brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
@@ -49,6 +53,16 @@ chmod 755 minio
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| 操作系统 | CPU架构 | 地址 |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
@@ -64,7 +78,7 @@ minio.exe server D:\Photos
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装。
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装,, MinIO官方并没有提供FreeBSD二进制文件, 它由FreeBSD上游维护,点击 [这里](https://www.freshports.org/www/minio)查看。
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -75,14 +89,68 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install).
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go get -u github.com/minio/minio
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
## 为防火墙设置允许访问的端口
|
||||
|
||||
默认情况下,MinIO 使用端口9000来侦听传入的连接。如果你的平台默认阻止了该端口,则需要启用对该端口的访问。
|
||||
|
||||
### ufw
|
||||
|
||||
对于启用了ufw的主机(基于Debian的发行版), 你可以通过`ufw`命令允许指定端口上的所有流量连接. 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
ufw allow 9000
|
||||
```
|
||||
|
||||
如下命令允许端口9000-9010上的所有传入流量。
|
||||
|
||||
```sh
|
||||
ufw allow 9000:9010/tcp
|
||||
```
|
||||
|
||||
### firewall-cmd
|
||||
|
||||
对于启用了firewall-cmd的主机(CentOS), 你可以通过`firewall-cmd`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
firewall-cmd --get-active-zones
|
||||
```
|
||||
|
||||
这个命令获取当前正在使用的区域。 现在,就可以为以上返回的区域应用端口规则了。 假如返回的区域是 `public`, 使用如下命令
|
||||
|
||||
```sh
|
||||
firewall-cmd --zone=public --add-port=9000/tcp --permanent
|
||||
```
|
||||
|
||||
这里的`permanent`参数表示持久化存储规则,可用于防火墙启动、重启和重新加载。 最后,需要防火墙重新加载,让我们刚刚的修改生效。
|
||||
|
||||
```sh
|
||||
firewall-cmd --reload
|
||||
```
|
||||
|
||||
### iptables
|
||||
|
||||
对于启用了iptables的主机(RHEL, CentOS, etc), 你可以通过`iptables`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
如下命令允许端口9000-9010上的所有传入流量。
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## 使用MinIO浏览器进行验证
|
||||
安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000),如果可以访问,则表示minio已经安装成功。
|
||||
MinIO Server带有一个嵌入的Web对象浏览器,安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000),如果可以访问,则表示minio已经安装成功。
|
||||
|
||||

|
||||
|
||||
@@ -94,6 +162,25 @@ go get -u github.com/minio/minio
|
||||
|
||||
上述描述对所有网关后端同样有效。
|
||||
|
||||
## 升级 MinIO
|
||||
MinIO 服务端支持滚动升级, 也就是说你可以一次更新分布式集群中的一个MinIO实例。 这样可以在不停机的情况下进行升级。可以通过将二进制文件替换为最新版本并以滚动方式重新启动所有服务器来手动完成升级。但是, 我们建议所有用户从客户端使用 [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) 命令升级。 这将同时更新集群中的所有节点并重新启动它们, 如下命令所示:
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
> 注意: 有些发行版可能不允许滚动升级,这通常在发行说明中提到,所以建议在升级之前阅读发行说明。在这种情况下,建议使用`mc admin update`升级机制来一次升级所有服务器。
|
||||
|
||||
### MinIO升级时要记住的重要事项
|
||||
|
||||
- `mc admin update` 命令仅当运行MinIO的用户对二进制文件所在的父目录具有写权限时才工作, 比如当前二进制文件位于`/usr/local/bin/minio`, 你需要具备`/usr/local/bin`目录的写权限.
|
||||
- `mc admin update` 命令同时更新并重新启动所有服务器,应用程序将在升级后重试并继续各自的操作。
|
||||
- `mc admin update` 命令在 kubernetes/container 环境下是不能用的, 容器环境提供了它自己的更新机制来更新。
|
||||
- 对于联盟部署模式,应分别针对每个群集运行`mc admin update`。 在成功更新所有群集之前,不要将`mc`更新为任何新版本。
|
||||
- 如果将`kes`用作MinIO的KMS,只需替换二进制文件并重新启动`kes`,可以在 [这里](https://github.com/minio/kes/wiki) 找到有关`kes`的更多信息。
|
||||
- 如果将Vault作为MinIO的KMS,请确保已遵循如下Vault升级过程的概述:https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- 如果将MindIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## 了解更多
|
||||
- [MinIO纠删码入门](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [`mc`快速入门](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
@@ -104,3 +191,6 @@ go get -u github.com/minio/minio
|
||||
|
||||
## 如何参与到MinIO项目
|
||||
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
|
||||
|
||||
## 授权许可
|
||||
MinIO的使用受 Apache 2.0 License 约束,你可以在 [LICENSE](./LICENSE) 查看许可。
|
||||
|
||||
39
VULNERABILITY_REPORT.md
Normal file
39
VULNERABILITY_REPORT.md
Normal file
@@ -0,0 +1,39 @@
|
||||
## Vulnerability Management Policy
|
||||
|
||||
This document formally describes the process of addressing and managing a
|
||||
reported vulnerability that has been found in the MinIO server code base,
|
||||
any directly connected ecosystem component or a direct / indirect dependency
|
||||
of the code base.
|
||||
|
||||
### Scope
|
||||
|
||||
The vulnerability management policy described in this document covers the
|
||||
process of investigating, assessing and resolving a vulnerability report
|
||||
opened by a MinIO employee or an external third party.
|
||||
|
||||
Therefore, it lists pre-conditions and actions that should be performed to
|
||||
resolve and fix a reported vulnerability.
|
||||
|
||||
### Vulnerability Management Process
|
||||
|
||||
The vulnerability management process requires that the vulnerability report
|
||||
contains the following information:
|
||||
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
reported vulnerability and how it might be exploited. Alternatively,
|
||||
a well-established vulnerability identifier, e.g. CVE number, can be
|
||||
used instead.
|
||||
|
||||
Based on the description mentioned above, a MinIO engineer or security team
|
||||
member investigates:
|
||||
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
|
||||
In general, if the vulnerability exists in one of the MinIO code bases
|
||||
itself - not in a code dependency - then MinIO will, if possible, fix
|
||||
the vulnerability or implement reasonable countermeasures such that the
|
||||
vulnerability cannot be exploited anymore.
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
``MinIO Browser`` provides minimal set of UI to manage buckets and objects on ``minio`` server. ``MinIO Browser`` is written in javascript and released under [Apache 2.0 License](./LICENSE).
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Install node
|
||||
@@ -11,6 +12,11 @@ exec -l $SHELL
|
||||
nvm install stable
|
||||
```
|
||||
|
||||
### Install node dependencies
|
||||
```sh
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
@@ -30,13 +36,16 @@ npm run release
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
### Run MinIO Browser with live reload
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on custom port
|
||||
|
||||
@@ -54,7 +63,7 @@ index 3ccdaba..9496c56 100644
|
||||
+ port: 8888,
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
target: 'http://localhost:9000',
|
||||
@@ -97,7 +98,7 @@ var exports = {
|
||||
if (process.env.NODE_ENV === 'dev') {
|
||||
exports.entry = [
|
||||
@@ -70,4 +79,102 @@ index 3ccdaba..9496c56 100644
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on any IP
|
||||
|
||||
Edit `browser/webpack.config.js`
|
||||
|
||||
```diff
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
```
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
## Run tests
|
||||
|
||||
npm run test
|
||||
|
||||
|
||||
## Docker development environment
|
||||
|
||||
This approach will download the sources on your machine such that you are able to use your IDE or editor of choice.
|
||||
A Docker container will be used in order to provide a controlled build environment without messing with your host system.
|
||||
|
||||
### Prepare host system
|
||||
|
||||
Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and [Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
### Development within container
|
||||
|
||||
Prepare and build container
|
||||
```
|
||||
git clone git@github.com:minio/minio.git
|
||||
cd minio
|
||||
docker build -t minio-dev -f Dockerfile.dev.browser .
|
||||
```
|
||||
|
||||
Run container, build and run core
|
||||
```sh
|
||||
docker run -it --rm --name minio-dev -v "$PWD":/minio minio-dev
|
||||
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run release
|
||||
cd /minio
|
||||
make
|
||||
./minio server /data
|
||||
```
|
||||
Note `Endpoint` IP (the one which is _not_ `127.0.0.1`), `AccessKey` and `SecretKey` (both default to `minioadmin`) in order to enter them in the browser later.
|
||||
|
||||
|
||||
Open another terminal.
|
||||
Connect to container
|
||||
```sh
|
||||
docker exec -it minio-dev bash
|
||||
```
|
||||
|
||||
Apply patch to allow access from outside container
|
||||
```sh
|
||||
cd /minio
|
||||
git apply --ignore-whitespace <<EOF
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
EOF
|
||||
```
|
||||
|
||||
Build and run frontend with auto-reload
|
||||
```sh
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -88,11 +88,6 @@ export class ChangePasswordModal extends React.Component {
|
||||
|
||||
canChangePassword() {
|
||||
const { serverInfo } = this.props
|
||||
// Password change is not allowed in WORM mode
|
||||
if (serverInfo.info.isWorm) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Password change is not allowed for temporary users(STS)
|
||||
if(serverInfo.userInfo.isTempUser) {
|
||||
return false
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -26,13 +26,10 @@ export class StorageInfo extends React.Component {
|
||||
}
|
||||
render() {
|
||||
const { used } = this.props.storageInfo
|
||||
|
||||
if (!used) {
|
||||
if (!used || used == 0) {
|
||||
return <noscript />
|
||||
}
|
||||
|
||||
const totalUsed = used.reduce((v1, v2) => v1 + v2, 0)
|
||||
|
||||
return (
|
||||
<div className="feh-used">
|
||||
<div className="fehu-chart">
|
||||
@@ -41,7 +38,7 @@ export class StorageInfo extends React.Component {
|
||||
<ul>
|
||||
<li>
|
||||
<span>Used: </span>
|
||||
{humanize.filesize(totalUsed)}
|
||||
{humanize.filesize(used)}
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
@@ -64,17 +64,6 @@ describe("ChangePasswordModal", () => {
|
||||
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||
})
|
||||
|
||||
it("should not allow changing password when isWorm is true", () => {
|
||||
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
|
||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
||||
expect(
|
||||
wrapper
|
||||
.find("ModalBody")
|
||||
.childAt(0)
|
||||
.text()
|
||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
||||
})
|
||||
|
||||
it("should not allow changing password when not IAM user", () => {
|
||||
const newServerInfo = {
|
||||
...serverInfo,
|
||||
|
||||
@@ -21,7 +21,7 @@ import { StorageInfo } from "../StorageInfo"
|
||||
describe("StorageInfo", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(
|
||||
<StorageInfo storageInfo={{ used: [60] }} fetchStorageInfo={jest.fn()} />
|
||||
<StorageInfo storageInfo={ {used: 60} } fetchStorageInfo={jest.fn()} />
|
||||
)
|
||||
})
|
||||
|
||||
@@ -29,7 +29,7 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: [60] }}
|
||||
storageInfo={ {used: 60} }
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
@@ -40,7 +40,7 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: null }}
|
||||
storageInfo={ {used: 0} }
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
|
||||
@@ -20,7 +20,9 @@ import * as actionsCommon from "../actions"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
StorageInfo: jest.fn(() => {
|
||||
return Promise.resolve({ storageInfo: { Used: [60] } })
|
||||
return Promise.resolve({
|
||||
used: 60
|
||||
})
|
||||
}),
|
||||
ServerInfo: jest.fn(() => {
|
||||
return Promise.resolve({
|
||||
@@ -39,7 +41,7 @@ describe("Common actions", () => {
|
||||
it("creates common/SET_STORAGE_INFO after fetching the storage details ", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: [60] } }
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: 60 } }
|
||||
]
|
||||
return store.dispatch(actionsCommon.fetchStorageInfo()).then(() => {
|
||||
const actions = store.getActions()
|
||||
|
||||
@@ -21,11 +21,7 @@ describe("common reducer", () => {
|
||||
it("should return the initial state", () => {
|
||||
expect(reducer(undefined, {})).toEqual({
|
||||
sidebarOpen: false,
|
||||
storageInfo: {
|
||||
total: [0],
|
||||
free: [0],
|
||||
used: [0]
|
||||
},
|
||||
storageInfo: {used: 0},
|
||||
serverInfo: {}
|
||||
})
|
||||
})
|
||||
@@ -62,11 +58,11 @@ describe("common reducer", () => {
|
||||
{},
|
||||
{
|
||||
type: actionsCommon.SET_STORAGE_INFO,
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
storageInfo: { }
|
||||
}
|
||||
)
|
||||
).toEqual({
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
storageInfo: { }
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -33,8 +33,7 @@ export const fetchStorageInfo = () => {
|
||||
return function(dispatch) {
|
||||
return web.StorageInfo().then(res => {
|
||||
const storageInfo = {
|
||||
total: res.storageInfo.Total,
|
||||
used: res.storageInfo.Used
|
||||
used: res.used
|
||||
}
|
||||
dispatch(setStorageInfo(storageInfo))
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ import * as actionsCommon from "./actions"
|
||||
export default (
|
||||
state = {
|
||||
sidebarOpen: false,
|
||||
storageInfo: { total: [0], free: [0], used: [0] },
|
||||
storageInfo: {used: 0},
|
||||
serverInfo: {}
|
||||
},
|
||||
action
|
||||
|
||||
24
browser/app/js/browser/selectors.js
Normal file
24
browser/app/js/browser/selectors.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { createSelector } from "reselect"
|
||||
|
||||
export const getServerInfo = state => state.browser.serverInfo
|
||||
|
||||
export const hasServerPublicDomain = createSelector(
|
||||
getServerInfo,
|
||||
serverInfo => Boolean(serverInfo.info && serverInfo.info.domains && serverInfo.info.domains.length),
|
||||
)
|
||||
@@ -31,19 +31,38 @@ export const SET_POLICIES = "buckets/SET_POLICIES"
|
||||
|
||||
export const fetchBuckets = () => {
|
||||
return function(dispatch) {
|
||||
const { bucket, prefix } = pathSlice(history.location.pathname)
|
||||
return web.ListBuckets().then(res => {
|
||||
const buckets = res.buckets ? res.buckets.map(bucket => bucket.name) : []
|
||||
dispatch(setList(buckets))
|
||||
if (buckets.length > 0) {
|
||||
const { bucket, prefix } = pathSlice(history.location.pathname)
|
||||
dispatch(setList(buckets))
|
||||
if (bucket && buckets.indexOf(bucket) > -1) {
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(selectBucket(buckets[0]))
|
||||
}
|
||||
} else {
|
||||
dispatch(selectBucket(""))
|
||||
history.replace("/")
|
||||
if (bucket) {
|
||||
dispatch(setList([bucket]))
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(selectBucket(""))
|
||||
history.replace("/")
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
if (bucket && err.message === "Access Denied." || err.message.indexOf('Prefix access is denied') > -1 ) {
|
||||
dispatch(setList([bucket]))
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -14,30 +14,67 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import mimedb from 'mime-types'
|
||||
import mimedb from "mime-types"
|
||||
|
||||
const isFolder = (name, contentType) => {
|
||||
if (name.endsWith('/')) return true
|
||||
if (name.endsWith("/")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isPdf = (name, contentType) => {
|
||||
if (contentType === 'application/pdf') return true
|
||||
if (contentType === "application/pdf") return true
|
||||
return false
|
||||
}
|
||||
const isImage = (name, contentType) => {
|
||||
if (
|
||||
contentType === "image/jpeg" ||
|
||||
contentType === "image/gif" ||
|
||||
contentType === "image/x-icon" ||
|
||||
contentType === "image/png" ||
|
||||
contentType === "image/svg+xml" ||
|
||||
contentType === "image/tiff" ||
|
||||
contentType === "image/webp"
|
||||
)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isZip = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[1].includes('zip')) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[1].includes("zip")) return true
|
||||
return false
|
||||
}
|
||||
|
||||
const isCode = (name, contentType) => {
|
||||
const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs',
|
||||
'php', 'css', 'less', 'scss', 'coffee', 'net', 'html',
|
||||
'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp',
|
||||
'asp', 'aspx']
|
||||
const ext = name.split('.').reverse()[0]
|
||||
const codeExt = [
|
||||
"c",
|
||||
"cpp",
|
||||
"go",
|
||||
"py",
|
||||
"java",
|
||||
"rb",
|
||||
"js",
|
||||
"pl",
|
||||
"fs",
|
||||
"php",
|
||||
"css",
|
||||
"less",
|
||||
"scss",
|
||||
"coffee",
|
||||
"net",
|
||||
"html",
|
||||
"rs",
|
||||
"exs",
|
||||
"scala",
|
||||
"hs",
|
||||
"clj",
|
||||
"el",
|
||||
"scm",
|
||||
"lisp",
|
||||
"asp",
|
||||
"aspx",
|
||||
]
|
||||
const ext = name.split(".").reverse()[0]
|
||||
for (var i in codeExt) {
|
||||
if (ext === codeExt[i]) return true
|
||||
}
|
||||
@@ -45,9 +82,9 @@ const isCode = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isExcel = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['excel', 'spreadsheet']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["excel", "spreadsheet"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -55,9 +92,9 @@ const isExcel = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isDoc = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
const types = ['word', '.document']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
const types = ["word", ".document"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -65,9 +102,9 @@ const isDoc = (name, contentType) => {
|
||||
}
|
||||
|
||||
const isPresentation = (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
var types = ['powerpoint', 'presentation']
|
||||
const subType = contentType.split('/')[1]
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
var types = ["powerpoint", "presentation"]
|
||||
const subType = contentType.split("/")[1]
|
||||
for (var i in types) {
|
||||
if (subType.includes(types[i])) return true
|
||||
}
|
||||
@@ -76,31 +113,32 @@ const isPresentation = (name, contentType) => {
|
||||
|
||||
const typeToIcon = (type) => {
|
||||
return (name, contentType) => {
|
||||
if (!contentType || !contentType.includes('/')) return false
|
||||
if (contentType.split('/')[0] === type) return true
|
||||
if (!contentType || !contentType.includes("/")) return false
|
||||
if (contentType.split("/")[0] === type) return true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export const getDataType = (name, contentType) => {
|
||||
if (contentType === "") {
|
||||
contentType = mimedb.lookup(name) || 'application/octet-stream'
|
||||
contentType = mimedb.lookup(name) || "application/octet-stream"
|
||||
}
|
||||
const check = [
|
||||
['folder', isFolder],
|
||||
['code', isCode],
|
||||
['audio', typeToIcon('audio')],
|
||||
['image', typeToIcon('image')],
|
||||
['video', typeToIcon('video')],
|
||||
['text', typeToIcon('text')],
|
||||
['pdf', isPdf],
|
||||
['zip', isZip],
|
||||
['excel', isExcel],
|
||||
['doc', isDoc],
|
||||
['presentation', isPresentation]
|
||||
["folder", isFolder],
|
||||
["code", isCode],
|
||||
["audio", typeToIcon("audio")],
|
||||
["image", typeToIcon("image")],
|
||||
["video", typeToIcon("video")],
|
||||
["text", typeToIcon("text")],
|
||||
["pdf", isPdf],
|
||||
["image", isImage],
|
||||
["zip", isZip],
|
||||
["excel", isExcel],
|
||||
["doc", isDoc],
|
||||
["presentation", isPresentation],
|
||||
]
|
||||
for (var i in check) {
|
||||
if (check[i][1](name, contentType)) return check[i][0]
|
||||
}
|
||||
return 'other'
|
||||
return "other"
|
||||
}
|
||||
|
||||
@@ -19,18 +19,22 @@ import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import ShareObjectModal from "./ShareObjectModal"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import PreviewObjectModal from "./PreviewObjectModal"
|
||||
|
||||
import * as objectsActions from "./actions"
|
||||
import { getDataType } from "../mime.js"
|
||||
import {
|
||||
SHARE_OBJECT_EXPIRY_DAYS,
|
||||
SHARE_OBJECT_EXPIRY_HOURS,
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
SHARE_OBJECT_EXPIRY_MINUTES,
|
||||
} from "../constants"
|
||||
|
||||
export class ObjectActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
showPreview: false,
|
||||
}
|
||||
}
|
||||
shareObject(e) {
|
||||
@@ -43,6 +47,11 @@ export class ObjectActions extends React.Component {
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
)
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadObject } = this.props
|
||||
downloadObject(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
@@ -53,7 +62,20 @@ export class ObjectActions extends React.Component {
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
getObjectURL(objectname, callback) {
|
||||
const { getObjectURL } = this.props
|
||||
getObjectURL(objectname, callback)
|
||||
}
|
||||
showPreviewModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showPreview: true })
|
||||
}
|
||||
hidePreviewModal() {
|
||||
this.setState({
|
||||
showPreview: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
@@ -65,26 +87,54 @@ export class ObjectActions extends React.Component {
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Share"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fas fa-share-alt" />
|
||||
</a>
|
||||
{getDataType(object.name, object.contentType) == "image" && (
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Preview"
|
||||
onClick={this.showPreviewModal.bind(this)}
|
||||
>
|
||||
<i className="far fa-file-image" />
|
||||
</a>
|
||||
)}
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{(showShareObjectModal && shareObjectName === object.name) &&
|
||||
<ShareObjectModal object={object} />}
|
||||
{showShareObjectModal && shareObjectName === object.name && (
|
||||
<ShareObjectModal object={object} />
|
||||
)}
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
{this.state.showPreview && (
|
||||
<PreviewObjectModal
|
||||
object={object}
|
||||
hidePreviewModal={this.hidePreviewModal.bind(this)}
|
||||
getObjectURL={this.getObjectURL.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
@@ -94,15 +144,18 @@ const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
showShareObjectModal: state.objects.shareObject.show,
|
||||
shareObjectName: state.objects.shareObject.object
|
||||
shareObjectName: state.objects.shareObject.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadObject: object => dispatch(objectsActions.downloadObject(object)),
|
||||
shareObject: (object, days, hours, minutes) =>
|
||||
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
|
||||
deleteObject: object => dispatch(objectsActions.deleteObject(object))
|
||||
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
|
||||
getObjectURL: (object, callback) =>
|
||||
dispatch(objectsActions.getObjectURL(object, callback)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,8 +54,11 @@ export const ObjectItem = ({
|
||||
href={getDataType(name, contentType) === "folder" ? name : "#"}
|
||||
onClick={e => {
|
||||
e.preventDefault()
|
||||
// onclick function is passed only when we have a prefix
|
||||
if (onClick) {
|
||||
onClick()
|
||||
} else {
|
||||
checked ? uncheckObject(name) : checkObject(name)
|
||||
}
|
||||
}}
|
||||
>
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
95
browser/app/js/objects/PrefixActions.js
Normal file
95
browser/app/js/objects/PrefixActions.js
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import * as actions from "./actions"
|
||||
|
||||
export class PrefixActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false,
|
||||
}
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadPrefix } = this.props
|
||||
downloadPrefix(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
}
|
||||
showDeleteConfirmModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showDeleteConfirmation: true })
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { object, showShareObjectModal, shareObjectName } = this.props
|
||||
return (
|
||||
<Dropdown id={`obj-actions-${object.name}`}>
|
||||
<Dropdown.Toggle noCaret className="fia-toggle" />
|
||||
<Dropdown.Menu>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download as zip"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadPrefix: object => dispatch(actions.downloadPrefix(object)),
|
||||
deleteObject: (object) => dispatch(actions.deleteObject(object)),
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(PrefixActions)
|
||||
@@ -17,22 +17,32 @@
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import ObjectItem from "./ObjectItem"
|
||||
import PrefixActions from "./PrefixActions"
|
||||
import * as actionsObjects from "./actions"
|
||||
import { getCheckedList } from "./selectors"
|
||||
|
||||
export const PrefixContainer = ({ object, currentPrefix, selectPrefix }) => {
|
||||
export const PrefixContainer = ({
|
||||
object,
|
||||
currentPrefix,
|
||||
checkedObjectsCount,
|
||||
selectPrefix
|
||||
}) => {
|
||||
const props = {
|
||||
name: object.name,
|
||||
contentType: object.contentType,
|
||||
onClick: () => selectPrefix(`${currentPrefix}${object.name}`)
|
||||
}
|
||||
|
||||
if (checkedObjectsCount == 0) {
|
||||
props.actionButtons = <PrefixActions object={object} />
|
||||
}
|
||||
return <ObjectItem {...props} />
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
currentPrefix: state.objects.currentPrefix
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
checkedObjectsCount: getCheckedList(state).length
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
68
browser/app/js/objects/PreviewObjectModal.js
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { Modal, ModalHeader, ModalBody } from "react-bootstrap"
|
||||
|
||||
class PreviewObjectModal extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
url: "",
|
||||
}
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.props.getObjectURL(this.props.object.name, (url) => {
|
||||
this.setState({
|
||||
url: url,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
render() {
|
||||
const { hidePreviewModal } = this.props
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
animation={false}
|
||||
onHide={hidePreviewModal}
|
||||
bsSize="large"
|
||||
>
|
||||
<ModalHeader>Preview</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className="input-group">
|
||||
{this.state.url && (
|
||||
<object data={this.state.url} style={{ display: "block", width: "100%" }}>
|
||||
<h3 style={{ textAlign: "center", display: "block", width: "100%" }}>
|
||||
Do not have read permissions to preview "{this.props.object.name}"
|
||||
</h3>
|
||||
</object>
|
||||
)}
|
||||
</div>
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
{
|
||||
<button className="btn btn-link" onClick={hidePreviewModal}>
|
||||
Cancel
|
||||
</button>
|
||||
}
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
}
|
||||
export default PreviewObjectModal
|
||||
@@ -77,7 +77,8 @@ export class ShareObjectModal extends React.Component {
|
||||
hideShareObject()
|
||||
}
|
||||
render() {
|
||||
const { shareObjectDetails, shareObject, hideShareObject } = this.props
|
||||
const { shareObjectDetails, hideShareObject } = this.props
|
||||
const url = `${window.location.protocol}//${shareObjectDetails.url}`
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
@@ -93,11 +94,12 @@ export class ShareObjectModal extends React.Component {
|
||||
type="text"
|
||||
ref={node => (this.copyTextInput = node)}
|
||||
readOnly="readOnly"
|
||||
value={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
value={url}
|
||||
onClick={() => this.copyTextInput.select()}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
{shareObjectDetails.showExpiryDate && (
|
||||
<div
|
||||
className="input-group"
|
||||
style={{ display: web.LoggedIn() ? "block" : "none" }}
|
||||
>
|
||||
@@ -174,10 +176,11 @@ export class ShareObjectModal extends React.Component {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<CopyToClipboard
|
||||
text={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
text={url}
|
||||
onCopy={this.onUrlCopied.bind(this)}
|
||||
>
|
||||
<button className="btn btn-success">Copy Link</button>
|
||||
|
||||
@@ -66,6 +66,63 @@ describe("ObjectActions", () => {
|
||||
expect(deleteObject).toHaveBeenCalledWith("obj1")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadObject when single object is selected and download button is clicked", () => {
|
||||
const downloadObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadObject={downloadObject} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadObject).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
|
||||
it("should show PreviewObjectModal when preview action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1", contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showPreview")).toBeTruthy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide PreviewObjectModal when cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" , contentType: "image/jpeg"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("PreviewObjectModal").prop("hidePreviewModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showPreview")).toBeFalsy()
|
||||
expect(wrapper.find("PreviewObjectModal").length).toBe(0)
|
||||
})
|
||||
it("should not show PreviewObjectModal when preview action is clicked if object is not an image", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1"}}
|
||||
currentPrefix={"pre1/"} />
|
||||
)
|
||||
expect(wrapper
|
||||
.find("a")
|
||||
.length).toBe(3) // find only the other 2
|
||||
})
|
||||
|
||||
it("should call shareObject with object and expiry", () => {
|
||||
const shareObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
|
||||
@@ -30,7 +30,10 @@ describe("ObjectItem", () => {
|
||||
|
||||
it("shouldn't call onClick when the object isclicked", () => {
|
||||
const onClick = jest.fn()
|
||||
const wrapper = shallow(<ObjectItem name={"test"} />)
|
||||
const checkObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectItem name={"test"} checkObject={checkObject} />
|
||||
)
|
||||
wrapper.find("a").simulate("click", { preventDefault: jest.fn() })
|
||||
expect(onClick).not.toHaveBeenCalled()
|
||||
})
|
||||
@@ -57,9 +60,15 @@ describe("ObjectItem", () => {
|
||||
})
|
||||
|
||||
it("should call uncheckObject when the object/prefix is unchecked", () => {
|
||||
const checkObject = jest.fn()
|
||||
const uncheckObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectItem name={"test"} checked={true} uncheckObject={uncheckObject} />
|
||||
<ObjectItem
|
||||
name={"test"}
|
||||
checked={true}
|
||||
checkObject={checkObject}
|
||||
uncheckObject={uncheckObject}
|
||||
/>
|
||||
)
|
||||
wrapper.find("input[type='checkbox']").simulate("change")
|
||||
expect(uncheckObject).toHaveBeenCalledWith("test")
|
||||
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { PrefixActions } from "../PrefixActions"
|
||||
|
||||
describe("PrefixActions", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />)
|
||||
})
|
||||
|
||||
it("should show DeleteObjectConfirmModal when delete action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeTruthy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide DeleteObjectConfirmModal when Cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("hideDeleteConfirmModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeFalsy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(0)
|
||||
})
|
||||
|
||||
it("should call deleteObject with object name", () => {
|
||||
const deleteObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
deleteObject={deleteObject}
|
||||
/>
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("deleteObject")()
|
||||
expect(deleteObject).toHaveBeenCalledWith("abc/")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadPrefix when single object is selected and download button is clicked", () => {
|
||||
const downloadPrefix = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadPrefix={downloadPrefix} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.first()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadPrefix).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -41,4 +41,22 @@ describe("PrefixContainer", () => {
|
||||
wrapper.find("Connect(ObjectItem)").prop("onClick")()
|
||||
expect(selectPrefix).toHaveBeenCalledWith("xyz/abc/")
|
||||
})
|
||||
|
||||
it("should pass actions to ObjectItem", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={0} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).not.toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
|
||||
it("should pass empty actions to ObjectItem when checkedObjectCount is more than 0", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={1} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -34,7 +34,7 @@ describe("ShareObjectModal", () => {
|
||||
shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
})
|
||||
@@ -44,7 +44,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
/>
|
||||
)
|
||||
@@ -59,7 +59,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
@@ -76,7 +76,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
showCopyAlert={showCopyAlert}
|
||||
/>
|
||||
@@ -89,8 +89,15 @@ describe("ShareObjectModal", () => {
|
||||
describe("Update expiry values", () => {
|
||||
const props = {
|
||||
object: { name: "obj1" },
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test" }
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test", showExpiryDate: true }
|
||||
}
|
||||
|
||||
it("should not show expiry values if shared with public link", () => {
|
||||
const shareObjectDetails = { show: true, object: "obj1", url: "test", showExpiryDate: false }
|
||||
const wrapper = shallow(<ShareObjectModal {...props} shareObjectDetails={shareObjectDetails} />)
|
||||
expect(wrapper.find('.set-expire').exists()).toEqual(false)
|
||||
})
|
||||
|
||||
it("should have default expiry values", () => {
|
||||
const wrapper = shallow(<ShareObjectModal {...props} />)
|
||||
expect(wrapper.state("expiry")).toEqual({
|
||||
|
||||
@@ -34,6 +34,7 @@ jest.mock("../../web", () => ({
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
@@ -70,6 +71,16 @@ jest.mock("../../web", () => ({
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
})
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
}),
|
||||
GetBucketPolicy: jest.fn(({ bucketName, prefix }) => {
|
||||
if (!bucketName) {
|
||||
return Promise.reject({ message: "Invalid bucket" })
|
||||
}
|
||||
if (bucketName === 'test-public') return Promise.resolve({ policy: 'readonly' })
|
||||
return Promise.resolve({})
|
||||
})
|
||||
}))
|
||||
|
||||
const middlewares = [thunk]
|
||||
@@ -295,7 +306,8 @@ describe("Objects actions", () => {
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "b.txt",
|
||||
url: "test"
|
||||
url: "test",
|
||||
showExpiryDate: true
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.showShareObject("b.txt", "test"))
|
||||
@@ -321,14 +333,16 @@ describe("Objects actions", () => {
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "https://test.com/bk1/pre1/b.txt"
|
||||
url: "https://test.com/bk1/pre1/b.txt",
|
||||
showExpiryDate: true
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
@@ -347,10 +361,42 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared with public link", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-public" },
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: { info: { domains: ['public.com'] }} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "public.com/test-public/pre1/a.txt",
|
||||
showExpiryDate: false
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "success",
|
||||
message: "Object shared.",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store
|
||||
.dispatch(actionsObjects.shareObject("a.txt", 1, 0, 0))
|
||||
.then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates alert/SET when shareObject is failed", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
@@ -442,6 +488,34 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("should download prefix", () => {
|
||||
const open = jest.fn()
|
||||
const send = jest.fn()
|
||||
const xhrMockClass = () => ({
|
||||
open: open,
|
||||
send: send
|
||||
})
|
||||
window.XMLHttpRequest = jest.fn().mockImplementation(xhrMockClass)
|
||||
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
})
|
||||
return store.dispatch(actionsObjects.downloadPrefix("pre2/")).then(() => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=test`
|
||||
expect(open).toHaveBeenCalledWith("POST", requestUrl, true)
|
||||
expect(send).toHaveBeenCalledWith(
|
||||
JSON.stringify({
|
||||
bucketName: "bk1",
|
||||
prefix: "pre1/",
|
||||
objects: ["pre2/"]
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/CHECKED_LIST_ADD action", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -19,23 +19,24 @@ import history from "../history"
|
||||
import {
|
||||
sortObjectsByName,
|
||||
sortObjectsBySize,
|
||||
sortObjectsByDate
|
||||
sortObjectsByDate,
|
||||
} from "../utils"
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
SORT_BY_SIZE,
|
||||
SORT_BY_LAST_MODIFIED,
|
||||
SORT_ORDER_ASC,
|
||||
SORT_ORDER_DESC
|
||||
SORT_ORDER_DESC,
|
||||
} from "../constants"
|
||||
import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -48,35 +49,42 @@ export const CHECKED_LIST_REMOVE = "objects/CHECKED_LIST_REMOVE"
|
||||
export const CHECKED_LIST_RESET = "objects/CHECKED_LIST_RESET"
|
||||
export const SET_LIST_LOADING = "objects/SET_LIST_LOADING"
|
||||
|
||||
export const setList = objects => ({
|
||||
export const setList = (objects) => ({
|
||||
type: SET_LIST,
|
||||
objects
|
||||
objects,
|
||||
})
|
||||
|
||||
export const resetList = () => ({
|
||||
type: RESET_LIST
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setListLoading = listLoading => ({
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading
|
||||
listLoading,
|
||||
})
|
||||
|
||||
export const fetchObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(resetList())
|
||||
const {
|
||||
buckets: { currentBucket },
|
||||
objects: { currentPrefix }
|
||||
objects: { currentPrefix },
|
||||
} = getState()
|
||||
if (currentBucket) {
|
||||
dispatch(setListLoading(true))
|
||||
return web
|
||||
.ListObjects({
|
||||
bucketName: currentBucket,
|
||||
prefix: currentPrefix
|
||||
prefix: currentPrefix,
|
||||
})
|
||||
.then(res => {
|
||||
.then((res) => {
|
||||
// we need to check if the bucket name and prefix are the same as
|
||||
// when the request was made before updating the displayed objects
|
||||
if (
|
||||
@@ -85,10 +93,10 @@ export const fetchObjects = () => {
|
||||
) {
|
||||
let objects = []
|
||||
if (res.objects) {
|
||||
objects = res.objects.map(object => {
|
||||
objects = res.objects.map((object) => {
|
||||
return {
|
||||
...object,
|
||||
name: object.name.replace(currentPrefix, "")
|
||||
name: object.name.replace(currentPrefix, ""),
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -104,13 +112,13 @@ export const fetchObjects = () => {
|
||||
dispatch(setListLoading(false))
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
if (web.LoggedIn()) {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
dispatch(resetList())
|
||||
@@ -123,8 +131,8 @@ export const fetchObjects = () => {
|
||||
}
|
||||
}
|
||||
|
||||
export const sortObjects = sortBy => {
|
||||
return function(dispatch, getState) {
|
||||
export const sortObjects = (sortBy) => {
|
||||
return function (dispatch, getState) {
|
||||
const { objects } = getState()
|
||||
let sortOrder = SORT_ORDER_ASC
|
||||
// Reverse sort order if the list is already sorted on same field
|
||||
@@ -149,18 +157,18 @@ const sortObjectsList = (list, sortBy, sortOrder) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setSortBy = sortBy => ({
|
||||
export const setSortBy = (sortBy) => ({
|
||||
type: SET_SORT_BY,
|
||||
sortBy
|
||||
sortBy,
|
||||
})
|
||||
|
||||
export const setSortOrder = sortOrder => ({
|
||||
export const setSortOrder = (sortOrder) => ({
|
||||
type: SET_SORT_ORDER,
|
||||
sortOrder
|
||||
sortOrder,
|
||||
})
|
||||
|
||||
export const selectPrefix = prefix => {
|
||||
return function(dispatch, getState) {
|
||||
export const selectPrefix = (prefix) => {
|
||||
return function (dispatch, getState) {
|
||||
dispatch(setCurrentPrefix(prefix))
|
||||
dispatch(fetchObjects())
|
||||
dispatch(resetCheckedList())
|
||||
@@ -169,49 +177,49 @@ export const selectPrefix = prefix => {
|
||||
}
|
||||
}
|
||||
|
||||
export const setCurrentPrefix = prefix => {
|
||||
export const setCurrentPrefix = (prefix) => {
|
||||
return {
|
||||
type: SET_CURRENT_PREFIX,
|
||||
prefix
|
||||
prefix,
|
||||
}
|
||||
}
|
||||
|
||||
export const setPrefixWritable = prefixWritable => ({
|
||||
export const setPrefixWritable = (prefixWritable) => ({
|
||||
type: SET_PREFIX_WRITABLE,
|
||||
prefixWritable
|
||||
prefixWritable,
|
||||
})
|
||||
|
||||
export const deleteObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const deleteObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
return web
|
||||
.RemoveObject({
|
||||
bucketName: currentBucket,
|
||||
objects: [objectName]
|
||||
objects: [objectName],
|
||||
})
|
||||
.then(() => {
|
||||
dispatch(removeObject(object))
|
||||
})
|
||||
.catch(e => {
|
||||
.catch((e) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: e.message
|
||||
message: e.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export const removeObject = object => ({
|
||||
export const removeObject = (object) => ({
|
||||
type: REMOVE,
|
||||
object
|
||||
object,
|
||||
})
|
||||
|
||||
export const deleteCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const checkedObjects = getCheckedList(getState())
|
||||
for (let i = 0; i < checkedObjects.length; i++) {
|
||||
dispatch(deleteObject(checkedObjects[i]))
|
||||
@@ -221,33 +229,52 @@ export const deleteCheckedObjects = () => {
|
||||
}
|
||||
|
||||
export const shareObject = (object, days, hours, minutes) => {
|
||||
return function(dispatch, getState) {
|
||||
return function (dispatch, getState) {
|
||||
const hasServerDomain = hasServerPublicDomain(getState())
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
.GetBucketPolicy({ bucketName: currentBucket, prefix: currentPrefix })
|
||||
.catch(() => ({ policy: null }))
|
||||
.then(({ policy }) => {
|
||||
if (hasServerDomain && ['readonly', 'readwrite'].includes(policy)) {
|
||||
const domain = getServerInfo(getState()).info.domains[0]
|
||||
const url = `${domain}/${currentBucket}/${encodeURI(objectName)}`
|
||||
dispatch(showShareObject(object, url, false))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "Object shared."
|
||||
})
|
||||
)
|
||||
} else {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
})
|
||||
}
|
||||
})
|
||||
.then(obj => {
|
||||
.then((obj) => {
|
||||
if (!obj) return
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`
|
||||
message: `Object shared. Expires in ${days} days ${hours} hours ${minutes} minutes`,
|
||||
})
|
||||
)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
@@ -265,29 +292,29 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: `Object shared.`
|
||||
message: `Object shared.`,
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const showShareObject = (object, url) => ({
|
||||
export const showShareObject = (object, url, showExpiryDate = true) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: true,
|
||||
object,
|
||||
url
|
||||
url,
|
||||
showExpiryDate,
|
||||
})
|
||||
|
||||
export const hideShareObject = (object, url) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: false,
|
||||
object: "",
|
||||
url: ""
|
||||
url: "",
|
||||
})
|
||||
|
||||
export const downloadObject = object => {
|
||||
return function(dispatch, getState) {
|
||||
export const getObjectURL = (object, callback) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
@@ -295,78 +322,119 @@ export const downloadObject = object => {
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${
|
||||
res.token
|
||||
}`
|
||||
window.location = url
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
callback(url)
|
||||
})
|
||||
.catch(err => {
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
callback(url)
|
||||
}
|
||||
}
|
||||
}
|
||||
export const downloadObject = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const encObjectName = encodeURI(objectName)
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=${res.token}`
|
||||
window.location = url
|
||||
})
|
||||
.catch((err) => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
})
|
||||
} else {
|
||||
const url = `${window.location.origin}${minioBrowserPrefix}/download/${currentBucket}/${encObjectName}?token=`
|
||||
window.location = url
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const checkObject = object => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object
|
||||
})
|
||||
|
||||
export const uncheckObject = object => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function(dispatch, getState) {
|
||||
const state = getState()
|
||||
const req = {
|
||||
bucketName: getCurrentBucket(state),
|
||||
prefix: getCurrentPrefix(state),
|
||||
objects: getCheckedList(state)
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then(res => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
})
|
||||
.catch(err =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message
|
||||
})
|
||||
)
|
||||
)
|
||||
}
|
||||
export const downloadPrefix = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
[object],
|
||||
`${object.slice(0, -1)}.zip`,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, dispatch) => {
|
||||
|
||||
export const checkObject = (object) => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object,
|
||||
})
|
||||
|
||||
export const uncheckObject = (object) => ({
|
||||
type: CHECKED_LIST_REMOVE,
|
||||
object,
|
||||
})
|
||||
|
||||
export const resetCheckedList = () => ({
|
||||
type: CHECKED_LIST_RESET,
|
||||
})
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
getCheckedList(getState()),
|
||||
null,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadObjects = (bucketName, prefix, objects, filename, dispatch) => {
|
||||
const req = {
|
||||
bucketName: bucketName,
|
||||
prefix: prefix,
|
||||
objects: objects,
|
||||
}
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
})
|
||||
.catch((err) =>
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
})
|
||||
)
|
||||
)
|
||||
} else {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, filename, dispatch) => {
|
||||
var anchor = document.createElement("a")
|
||||
document.body.appendChild(anchor)
|
||||
|
||||
@@ -374,17 +442,17 @@ const downloadZip = (url, req, dispatch) => {
|
||||
xhr.open("POST", url, true)
|
||||
xhr.responseType = "blob"
|
||||
|
||||
xhr.onload = function(e) {
|
||||
xhr.onload = function (e) {
|
||||
if (this.status == 200) {
|
||||
dispatch(resetCheckedList())
|
||||
var blob = new Blob([this.response], {
|
||||
type: "octet/stream"
|
||||
type: "octet/stream",
|
||||
})
|
||||
var blobUrl = window.URL.createObjectURL(blob)
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
anchor.download = filename ||
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
@@ -89,7 +95,8 @@ export default (
|
||||
shareObject: {
|
||||
show: action.show,
|
||||
object: action.object,
|
||||
url: action.url
|
||||
url: action.url,
|
||||
showExpiryDate: action.showExpiryDate
|
||||
}
|
||||
}
|
||||
case actionsObjects.CHECKED_LIST_ADD:
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
|
||||
// Overwrite the default styling from react-dropzone; otherwise it
|
||||
// won't handle child elements correctly.
|
||||
const style = {
|
||||
height: "100%",
|
||||
flex: "1",
|
||||
borderWidth: "0",
|
||||
borderStyle: "dashed",
|
||||
borderColor: "#fff"
|
||||
@@ -48,18 +48,29 @@ export class Dropzone extends React.Component {
|
||||
const rejectStyle = {
|
||||
backgroundColor: "#ffdddd"
|
||||
}
|
||||
const getStyle = (isDragActive, isDragAccept, isDragReject) => ({
|
||||
...style,
|
||||
...(isDragActive ? activeStyle : {}),
|
||||
...(isDragReject ? rejectStyle : {})
|
||||
})
|
||||
|
||||
// disableClick means that it won't trigger a file upload box when
|
||||
// the user clicks on a file.
|
||||
return (
|
||||
<ReactDropzone
|
||||
style={style}
|
||||
activeStyle={activeStyle}
|
||||
rejectStyle={rejectStyle}
|
||||
disableClick={true}
|
||||
onDrop={this.onDrop.bind(this)}
|
||||
>
|
||||
{this.props.children}
|
||||
{({getRootProps, getInputProps, isDragActive, isDragAccept, isDragReject}) => (
|
||||
<div
|
||||
{...getRootProps({
|
||||
onClick: event => event.stopPropagation()
|
||||
})}
|
||||
style={getStyle(isDragActive, isDragAccept, isDragReject)}
|
||||
>
|
||||
<input {...getInputProps()} />
|
||||
{this.props.children}
|
||||
</div>
|
||||
)}
|
||||
</ReactDropzone>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -89,11 +89,16 @@ export const uploadFile = file => {
|
||||
return
|
||||
}
|
||||
const currentPrefix = getCurrentPrefix(state)
|
||||
const objectName = `${currentPrefix}${file.name}`
|
||||
var _filePath = file.path || file.name
|
||||
if (_filePath.charAt(0) == '/') {
|
||||
_filePath = _filePath.substring(1)
|
||||
}
|
||||
const filePath = _filePath
|
||||
const objectName = `${currentPrefix}${filePath}`
|
||||
const uploadUrl = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`
|
||||
const slug = `${currentBucket}-${currentPrefix}-${file.name}`
|
||||
const slug = `${currentBucket}-${currentPrefix}-${filePath}`
|
||||
|
||||
let xhr = new XMLHttpRequest()
|
||||
xhr.open("PUT", uploadUrl, true)
|
||||
@@ -141,7 +146,7 @@ export const uploadFile = file => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "File '" + file.name + "' uploaded successfully."
|
||||
message: "File '" + filePath + "' uploaded successfully."
|
||||
})
|
||||
)
|
||||
dispatch(objectsActions.selectPrefix(currentPrefix))
|
||||
@@ -153,7 +158,7 @@ export const uploadFile = file => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: "Error occurred uploading '" + file.name + "'."
|
||||
message: "Error occurred uploading '" + filePath + "'."
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
@@ -21,7 +21,7 @@ import storage from 'local-storage-fallback'
|
||||
|
||||
class Web {
|
||||
constructor(endpoint) {
|
||||
const namespace = 'Web'
|
||||
const namespace = 'web'
|
||||
this.JSONrpc = new JSONrpc({
|
||||
endpoint,
|
||||
namespace
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
@media(max-width: @screen-sm-max) {
|
||||
padding: 75px 0 80px;
|
||||
}
|
||||
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height:100vh;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ div.fesl-row {
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: default;
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,12 +114,6 @@ div.fesl-row {
|
||||
----------------------------*/
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f07b');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
}
|
||||
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
|
||||
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
|
||||
@@ -355,6 +349,7 @@ div.fesl-row {
|
||||
margin: 0;
|
||||
height: 100%;
|
||||
text-align: right;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.dropdown {
|
||||
@@ -501,4 +496,4 @@ div.fesl-row {
|
||||
.opacity(1);
|
||||
right: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10916
browser/package-lock.json
generated
10916
browser/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -28,71 +28,70 @@
|
||||
},
|
||||
"homepage": "https://github.com/minio/minio",
|
||||
"devDependencies": {
|
||||
"async": "^1.5.2",
|
||||
"async": "^3.2.0",
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.3",
|
||||
"babel-jest": "^22.1.0",
|
||||
"babel-jest": "^23.6.0",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-syntax-object-rest-spread": "^6.13.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.8.0",
|
||||
"babel-polyfill": "^6.23.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.26.0",
|
||||
"babel-polyfill": "^6.26.0",
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.26.0",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.10.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"copy-webpack-plugin": "^6.0.1",
|
||||
"css-loader": "^3.5.3",
|
||||
"enzyme": "^3.11.0",
|
||||
"enzyme-adapter-react-16": "^1.15.2",
|
||||
"esformatter": "^0.11.3",
|
||||
"esformatter-jsx": "^8.0.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
"html-webpack-plugin": "^4.3.0",
|
||||
"jest": "^23.6.0",
|
||||
"jest-enzyme": "^7.1.2",
|
||||
"json-loader": "^0.5.7",
|
||||
"less": "^3.11.1",
|
||||
"less-loader": "^6.1.0",
|
||||
"purgecss-webpack-plugin": "^2.2.0",
|
||||
"style-loader": "^1.2.1",
|
||||
"url-loader": "^4.1.0",
|
||||
"webpack-cli": "^3.3.11",
|
||||
"webpack-dev-server": "^3.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-free": "^5.10.0",
|
||||
"@fortawesome/fontawesome-free": "^5.13.0",
|
||||
"bootstrap": "^3.4.1",
|
||||
"classnames": "^2.2.3",
|
||||
"core-js": "^3.2.1",
|
||||
"expect": "^1.20.2",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"classnames": "^2.2.6",
|
||||
"core-js": "^3.6.5",
|
||||
"expect": "^26.0.1",
|
||||
"glob-all": "^3.2.1",
|
||||
"history": "^4.10.1",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
"json-loader": "^0.5.4",
|
||||
"jwt-decode": "^2.2.0",
|
||||
"local-storage-fallback": "^4.0.2",
|
||||
"local-storage-fallback": "^4.1.1",
|
||||
"material-design-iconic-font": "^2.2.0",
|
||||
"mime-db": "^1.25.0",
|
||||
"mime-types": "^2.1.13",
|
||||
"moment": "^2.24.0",
|
||||
"query-string": "^6.8.2",
|
||||
"react": "^16.2.0",
|
||||
"react-addons-test-utils": "^0.14.8",
|
||||
"react-bootstrap": "^0.32.0",
|
||||
"react-copy-to-clipboard": "^5.0.1",
|
||||
"mime-db": "^1.44.0",
|
||||
"mime-types": "^2.1.27",
|
||||
"moment": "^2.26.0",
|
||||
"query-string": "^6.12.1",
|
||||
"react": "^16.13.1",
|
||||
"react-addons-test-utils": "^15.6.2",
|
||||
"react-bootstrap": "^0.32.4",
|
||||
"react-copy-to-clipboard": "^5.0.2",
|
||||
"react-custom-scrollbars": "^4.2.1",
|
||||
"react-dom": "^16.2.0",
|
||||
"react-dropzone": "^4.2.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-dom": "^16.13.1",
|
||||
"react-dropzone": "^11.0.1",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-redux": "^5.0.6",
|
||||
"react-router-dom": "^4.2.0",
|
||||
"redux": "^3.7.2",
|
||||
"redux-mock-store": "^1.5.1",
|
||||
"redux-thunk": "^2.2.0",
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
"redux-mock-store": "^1.5.4",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"reselect": "^4.0.0",
|
||||
"superagent": "^5.2.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^4.28.3"
|
||||
"webpack": "^4.43.0"
|
||||
}
|
||||
}
|
||||
|
||||
1
browser/staticcheck.conf
Normal file
1
browser/staticcheck.conf
Normal file
@@ -0,0 +1 @@
|
||||
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]
|
||||
File diff suppressed because one or more lines are too long
@@ -73,17 +73,17 @@ var exports = {
|
||||
},
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
},
|
||||
'/minio/upload/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/download/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/zip': {
|
||||
target: 'http://localhost:9000',
|
||||
@@ -92,7 +92,7 @@ var exports = {
|
||||
}
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -102,7 +102,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -67,7 +67,7 @@ var exports = {
|
||||
fs:'empty'
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -77,7 +77,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
@@ -20,11 +20,11 @@ function _build() {
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
# Go build to build the binary.
|
||||
# go build -trimpath to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
go build -tags kqueue -o /dev/null
|
||||
go build -trimpath -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
@@ -46,7 +46,10 @@ function main()
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh \
|
||||
aws-sdk-go aws-sdk-java aws-sdk-php aws-sdk-ruby awscli \
|
||||
healthcheck mc minio-dotnet minio-js \
|
||||
minio-py s3cmd s3select security
|
||||
rv=$?
|
||||
|
||||
kill "$sr_pid"
|
||||
|
||||
@@ -44,10 +44,21 @@ func releaseTag(version string) string {
|
||||
relPrefix = prefix
|
||||
}
|
||||
|
||||
relSuffix := ""
|
||||
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
|
||||
relSuffix = hotfix
|
||||
}
|
||||
|
||||
relTag := strings.Replace(version, " ", "-", -1)
|
||||
relTag = strings.Replace(relTag, ":", "-", -1)
|
||||
relTag = strings.Replace(relTag, ",", "", -1)
|
||||
return relPrefix + "." + relTag
|
||||
relTag = relPrefix + "." + relTag
|
||||
|
||||
if relSuffix != "" {
|
||||
relTag += "." + relSuffix
|
||||
}
|
||||
|
||||
return relTag
|
||||
}
|
||||
|
||||
// commitID returns the abbreviated commit-id hash of the last commit.
|
||||
@@ -68,5 +79,12 @@ func commitID() string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Println(genLDFlags(time.Now().UTC().Format(time.RFC3339)))
|
||||
var version string
|
||||
if len(os.Args) > 1 {
|
||||
version = os.Args[1]
|
||||
} else {
|
||||
version = time.Now().UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
fmt.Println(genLDFlags(version))
|
||||
}
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 20m "$d"
|
||||
CGO_ENABLED=1 go test -v -race --timeout 50m "$d"
|
||||
done
|
||||
|
||||
@@ -45,88 +45,64 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server > "$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
function start_minio_pool_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/pool-disk-sets{1...4} http://127.0.0.1:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address ":9000" > "$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" > "$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
export MINIO_ENDPOINTS="http://127.0.0.1:9000${WORK_DIR}/dist-disk1 http://127.0.0.1:9001${WORK_DIR}/dist-disk2 http://127.0.0.1:9002${WORK_DIR}/dist-disk3 http://127.0.0.1:9003${WORK_DIR}/dist-disk4"
|
||||
for i in $(seq 0 3); do
|
||||
"${MINIO[@]}" server --address ":900${i}" > "$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -137,13 +113,14 @@ function run_test_fs()
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
function run_test_erasure_sets()
|
||||
{
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -154,83 +131,51 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
function run_test_pool_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
start_minio_pool_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/pool-minio-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/pool-minio-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_pool_erasure_sets_ipv6()
|
||||
{
|
||||
start_minio_pool_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-900$i.log"
|
||||
cat "$WORK_DIR/pool-minio-ipv6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
rm -f "$WORK_DIR/pool-minio-ipv6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
@@ -238,12 +183,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -256,14 +201,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -279,7 +222,7 @@ function run_test_dist_erasure()
|
||||
|
||||
rm -f "$WORK_DIR/dist-minio-9000.log" "$WORK_DIR/dist-minio-9001.log" "$WORK_DIR/dist-minio-9002.log" "$WORK_DIR/dist-minio-9003.log"
|
||||
|
||||
return "$rv"
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function purge()
|
||||
@@ -352,14 +295,14 @@ function main()
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Eraure expanded setup"
|
||||
if ! run_test_zone_erasure_sets; then
|
||||
if ! run_test_pool_erasure_sets; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure expanded setup with ipv6"
|
||||
if ! run_test_zone_erasure_sets_ipv6; then
|
||||
if ! run_test_pool_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
@@ -29,34 +29,59 @@ MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function start_minio_3_node() {
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
ARGS+=("http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[8000+$i]${WORK_DIR}/$i/6/")
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":8001" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8001.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
"${MINIO[@]}" --address ":8002" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8002.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
"${MINIO[@]}" --address ":8003" ${ARGS[@]} > "${WORK_DIR}/dist-minio-8003.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
disown $!
|
||||
|
||||
sleep "$1"
|
||||
echo "${minio_pids[@]}"
|
||||
if [ "$(pgrep -c minio)" -ne 3 ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1;
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
for i in $(seq 1 3); do
|
||||
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-$[8000+$i].log; then
|
||||
echo "1"
|
||||
fi
|
||||
done
|
||||
if grep -q 'Server switching to safe mode' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge()
|
||||
@@ -75,50 +100,21 @@ function __init__()
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
minio_pids=( $(start_minio_3_node 60) )
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
kill -9 "$pid"
|
||||
done
|
||||
start_minio_3_node 60
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
minio_pids=( $(start_minio_3_node 60) )
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
if ! kill "$pid"; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
# if the previous kill is taking time.
|
||||
kill -9 "$pid"
|
||||
done
|
||||
start_minio_3_node 60
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill -9 "$pid"
|
||||
done
|
||||
pkill -9 minio
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-$[8000+$i].log"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
|
||||
@@ -100,18 +100,18 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -159,6 +159,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
},
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -214,18 +215,18 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
299
cmd/admin-bucket-handlers.go
Normal file
299
cmd/admin-bucket-handlers.go
Normal file
@@ -0,0 +1,299 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
bucketTargetsFile = "bucket-targets.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
||||
// SetRemoteTargetHandler - sets a remote target for bucket
|
||||
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetBucketTarget", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, _, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
password := cred.SecretKey
|
||||
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
|
||||
if sameTarget && bucket == target.TargetBucket {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
target.SourceBucket = bucket
|
||||
if !update {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(target.Arn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
|
||||
// for a particular ARN type
|
||||
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketTargets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketTargets", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arnType := vars["type"]
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
|
||||
data, err := json.Marshal(targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
|
||||
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveBucketTarget", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@@ -35,50 +35,47 @@ import (
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
return objectAPI
|
||||
return cred, objectAPI
|
||||
}
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v2/del-config-kv
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DelConfigKVHandler")
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -103,28 +100,24 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v2/set-config-kv
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKVHandler")
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -137,13 +130,13 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
dynamic, err := cfg.ReadConfig(bytes.NewReader(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -162,23 +155,36 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
|
||||
// If all values were dynamic, tell the client.
|
||||
if dynamic {
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKVHandler")
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg := globalServerConfig
|
||||
if globalSafeMode {
|
||||
if newObjectLayerFn() == nil {
|
||||
var err error
|
||||
cfg, err = getValidConfig(objectAPI)
|
||||
if err != nil {
|
||||
@@ -195,7 +201,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, buf.Bytes())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -206,9 +212,11 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -241,9 +249,11 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
|
||||
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -267,12 +277,12 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -287,9 +297,11 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
|
||||
// ListConfigHistoryKVHandler - lists all the KV ids.
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -313,7 +325,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -323,11 +335,13 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// HelpConfigKVHandler - GET /minio/admin/v2/help-config-kv?subSys={subSys}&key={key}
|
||||
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKVHandler")
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -349,28 +363,24 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v2/config
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigHandler")
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
@@ -379,12 +389,12 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
cfg := newServerConfig()
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
if _, err = cfg.ReadConfig(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -403,18 +413,20 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
// Make sure to write backend is encrypted
|
||||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v2/config
|
||||
// GetConfigHandler - GET /minio/admin/v3/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigHandler")
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -471,7 +483,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -35,8 +35,8 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
@@ -51,21 +51,17 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v2/remove-user?accessKey=<access_key>
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
@@ -93,15 +89,19 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers - GET /minio/admin/v2/list-users
|
||||
// ListUsers - GET /minio/admin/v3/list-users
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListUsers()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -114,7 +114,6 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -124,18 +123,47 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// GetUserInfo - GET /minio/admin/v2/user-info
|
||||
// GetUserInfo - GET /minio/admin/v3/user-info
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accessKey = cred.ParentUser
|
||||
}
|
||||
|
||||
implicitPerm := name == accessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -151,10 +179,12 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// UpdateGroupMembers - PUT /minio/admin/v2/update-group-members
|
||||
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -194,10 +224,12 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// GetGroup - /minio/admin/v2/group?group=mygroup1
|
||||
// GetGroup - /minio/admin/v3/group?group=mygroup1
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -221,10 +253,12 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// ListGroups - GET /minio/admin/v2/groups
|
||||
// ListGroups - GET /minio/admin/v3/groups
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -245,10 +279,12 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SetGroupStatus - PUT /minio/admin/v2/set-group-status?group=mygroup1&status=enabled
|
||||
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -280,26 +316,22 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v2/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
@@ -319,30 +351,53 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// AddUser - PUT /minio/admin/v2/add-user?accessKey=<access_key>
|
||||
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsTemp() || cred.IsServiceAccount() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Not allowed to add a user with same access key as root credential
|
||||
if owner && accessKey == cred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := accessKey == cred.AccessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -378,16 +433,331 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// InfoCannedPolicy - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var createReq madmin.AddServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
Credentials: auth.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(createResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccounts,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(listResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccount := mux.Vars(r)["accessKey"]
|
||||
if serviceAccount == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if parentUser != user || user == "" {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// log the error, continue with the accounting response
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountInfo{
|
||||
AccountName: accountName,
|
||||
Policy: globalIAMSys.GetCombinedPolicy(policies...),
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
var size uint64
|
||||
// Fetch the data usage of the current bucket
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
}
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created,
|
||||
Size: size,
|
||||
Access: madmin.AccountAccess{
|
||||
Read: rd,
|
||||
Write: wr,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
usageInfoJSON, err := json.Marshal(acctInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, usageInfoJSON)
|
||||
}
|
||||
|
||||
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -397,9 +767,35 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(policy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
@@ -412,7 +808,16 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(policies); err != nil {
|
||||
policyMap := make(map[string][]byte, len(policies))
|
||||
for k, p := range policies {
|
||||
var err error
|
||||
policyMap[k], err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -420,10 +825,46 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v2/remove-canned-policy?name=<policy_name>
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies()
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
}
|
||||
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -432,12 +873,6 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -452,10 +887,12 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
// AddCannedPolicy - PUT /minio/admin/v2/add-canned-policy?name=<policy_name>
|
||||
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -464,12 +901,6 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
||||
@@ -508,10 +939,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v2/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -522,15 +955,9 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !isGroup {
|
||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||
if err != nil {
|
||||
if err != nil && err != errNoSuchUser {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,26 +33,27 @@ import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
// adminErasureTestBed - encapsulates subsystems that need to be setup for
|
||||
// admin-handler unit tests.
|
||||
type adminXLTestBed struct {
|
||||
xlDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
type adminErasureTestBed struct {
|
||||
erasureDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// prepareAdminXLTestBed - helper function that setups a single-node
|
||||
// XL backend for admin-handler tests.
|
||||
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// prepareAdminErasureTestBed - helper function that setups a single-node
|
||||
// Erasure backend for admin-handler tests.
|
||||
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// Set globalIsErasure to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
globalIsErasure = true
|
||||
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer()
|
||||
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
return nil, xlErr
|
||||
}
|
||||
@@ -65,58 +66,47 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
newAllSubsystems()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(buckets, objLayer)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalEndpoints)
|
||||
globalNotificationSys.Init(buckets, objLayer)
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
registerAdminRouter(adminRouter, true, true)
|
||||
|
||||
return &adminXLTestBed{
|
||||
xlDirs: xlDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
return &adminErasureTestBed{
|
||||
erasureDirs: erasureDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminXLTestBed) TearDown() {
|
||||
removeRoots(atb.xlDirs)
|
||||
func (atb *adminErasureTestBed) TearDown() {
|
||||
removeRoots(atb.erasureDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
// initTestObjLayer - Helper function to initialize an Erasure-based object
|
||||
// layer and set globalObjectAPI.
|
||||
func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
xlDirs, err := getRandomDisks(16)
|
||||
func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
erasureDirs, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
|
||||
endpoints := mustGetNewEndpoints(erasureDirs...)
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(xlDirs)
|
||||
removeRoots(erasureDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer, err := newXLSets(endpoints, format, 1, 16)
|
||||
objLayer := &erasureServerPools{serverPools: make([]*erasureSets, 1)}
|
||||
objLayer.serverPools[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -125,7 +115,7 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
return objLayer, xlDirs, nil
|
||||
return objLayer, erasureDirs, nil
|
||||
}
|
||||
|
||||
// cmdType - Represents different service subcomands like status, stop
|
||||
@@ -191,9 +181,12 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
|
||||
// testServicesCmdHandler - parametrizes service subcommand tests on
|
||||
// cmdType value.
|
||||
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
@@ -259,9 +252,12 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
|
||||
}
|
||||
|
||||
defer adminTestBed.TearDown()
|
||||
@@ -303,7 +299,7 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
}{
|
||||
// 1. Server not in quorum.
|
||||
{
|
||||
err: errXLWriteQuorum,
|
||||
err: errErasureWriteQuorum,
|
||||
expectedAPIErr: ErrAdminConfigNoQuorum,
|
||||
},
|
||||
// 2. No error.
|
||||
@@ -314,12 +310,12 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
// 3. Non-admin API specific error.
|
||||
{
|
||||
err: errDiskNotFound,
|
||||
expectedAPIErr: toAPIErrorCode(context.Background(), errDiskNotFound),
|
||||
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
actualErr := toAdminAPIErrCode(context.Background(), test.err)
|
||||
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
|
||||
if actualErr != test.expectedAPIErr {
|
||||
t.Errorf("Test %d: Expected %v but received %v",
|
||||
i+1, test.expectedAPIErr, actualErr)
|
||||
@@ -331,13 +327,13 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
v.Add(mgmtClientToken, clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
v.Add(mgmtForceStart, "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
v.Add(mgmtForceStop, "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -355,11 +351,11 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
{mgmtPrefix: "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
{mgmtBucket: "bucket"},
|
||||
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -60,12 +61,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
return fmt.Errorf("Heal internal error: %s: %s",
|
||||
apiErr.Code, apiErr.Description)
|
||||
}
|
||||
@@ -73,18 +73,11 @@ var (
|
||||
|
||||
// healSequenceStatus - accumulated status of the heal sequence
|
||||
type healSequenceStatus struct {
|
||||
// lock to update this structure as it is concurrently
|
||||
// accessed
|
||||
updateLock *sync.RWMutex
|
||||
|
||||
// summary and detail for failures
|
||||
Summary healStatusSummary `json:"Summary"`
|
||||
FailureDetail string `json:"Detail,omitempty"`
|
||||
StartTime time.Time `json:"StartTime"`
|
||||
|
||||
// disk information
|
||||
NumDisks int `json:"NumDisks"`
|
||||
|
||||
// settings for the heal sequence
|
||||
HealSettings madmin.HealOpts `json:"Settings"`
|
||||
|
||||
@@ -94,31 +87,67 @@ type healSequenceStatus struct {
|
||||
|
||||
// structure to hold state of all heal sequences in server memory
|
||||
type allHealState struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
}
|
||||
|
||||
// initHealState - initialize healing apparatus
|
||||
func initHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState(cleanup bool) *allHealState {
|
||||
hstate := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean()
|
||||
|
||||
return healState
|
||||
if cleanup {
|
||||
go hstate.periodicHealSeqsClean(GlobalContext)
|
||||
}
|
||||
return hstate
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-time.After(time.Minute * 5):
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -127,7 +156,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
@@ -162,12 +191,17 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
he, exists := ahs.getHealSequence(path)
|
||||
if !exists {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: "invalid",
|
||||
ClientToken: "unknown",
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
clientToken := he.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s@%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: he.clientToken,
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: he.clientAddress,
|
||||
StartTime: he.startTime,
|
||||
}
|
||||
@@ -183,7 +217,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
}
|
||||
|
||||
b, err := json.Marshal(&hsp)
|
||||
return b, toAdminAPIErr(context.Background(), err)
|
||||
return b, toAdminAPIErr(GlobalContext, err)
|
||||
}
|
||||
|
||||
// LaunchNewHealSequence - launches a background routine that performs
|
||||
@@ -196,26 +230,17 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// `keepHealSeqStateDuration`. This function also launches a
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
if exists {
|
||||
if !he.hasEnded() || len(he.currentStatus.Items) > 0 {
|
||||
existsAndLive = true
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
return respBytes, apiErr, ""
|
||||
}
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
// A heal sequence exists on the given path.
|
||||
if h.forceStarted {
|
||||
// stop the running heal sequence - wait for it to finish.
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
} else {
|
||||
} else {
|
||||
oh, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
|
||||
if exists && !oh.hasEnded() {
|
||||
errMsg = "Heal is already running on the given path " +
|
||||
"(use force-start option to stop and start afresh). " +
|
||||
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
|
||||
@@ -229,10 +254,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
|
||||
// Check if new heal sequence to be started overlaps with any
|
||||
// existing, running sequence
|
||||
hpath := pathJoin(h.bucket, h.object)
|
||||
for k, hSeq := range ahs.healSeqMap {
|
||||
if !hSeq.hasEnded() && (strings.HasPrefix(k, h.path) ||
|
||||
strings.HasPrefix(h.path, k)) {
|
||||
|
||||
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
|
||||
@@ -240,19 +264,24 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
}
|
||||
|
||||
// Add heal state and start sequence
|
||||
ahs.healSeqMap[h.path] = h
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s@%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: h.clientToken,
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, toAPIError(h.ctx, err), ""
|
||||
return nil, toAdminAPIErr(h.ctx, err), ""
|
||||
}
|
||||
return b, noError, ""
|
||||
}
|
||||
@@ -261,14 +290,17 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// status results from global state and returns its JSON
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(path)
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
// If there is no such heal sequence, return error.
|
||||
return nil, ErrHealNoSuchProcess
|
||||
// heal sequence doesn't exist, must have finished.
|
||||
jbytes, err := json.Marshal(healSequenceStatus{
|
||||
Summary: healFinishedStatus,
|
||||
})
|
||||
return jbytes, toAdminAPIErrCode(GlobalContext, err)
|
||||
}
|
||||
|
||||
// Check if client-token is valid
|
||||
@@ -277,8 +309,8 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
}
|
||||
|
||||
// Take lock to access and update the heal-sequence
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
numItems := len(h.currentStatus.Items)
|
||||
|
||||
@@ -289,38 +321,42 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
|
||||
}
|
||||
|
||||
// After sending status to client, and before relinquishing
|
||||
// the updateLock, reset Item to nil and record the result
|
||||
// index sent to the client.
|
||||
defer func(i int64) {
|
||||
h.lastSentResultIndex = i
|
||||
h.currentStatus.Items = nil
|
||||
}(lastResultIndex)
|
||||
h.lastSentResultIndex = lastResultIndex
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
return jbytes, ErrNone
|
||||
}
|
||||
|
||||
// healSource denotes single entity and heal option.
|
||||
type healSource struct {
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts *madmin.HealOpts // optional heal option overrides default setting
|
||||
}
|
||||
|
||||
// healSequence - state for each heal sequence initiated on the
|
||||
// server.
|
||||
type healSequence struct {
|
||||
// bucket, and prefix on which heal seq. was initiated
|
||||
bucket, objPrefix string
|
||||
// bucket, and object on which heal seq. was initiated
|
||||
bucket, object string
|
||||
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// List of entities (format, buckets, objects) to heal
|
||||
sourceCh chan string
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
// Report healing progress, false if this is a background
|
||||
// healing since currently there is no entity which will
|
||||
// receive realtime healing status
|
||||
// Report healing progress
|
||||
reportProgress bool
|
||||
|
||||
// time at which heal sequence was started
|
||||
@@ -345,59 +381,141 @@ type healSequence struct {
|
||||
// completed
|
||||
traverseAndHealDoneCh chan error
|
||||
|
||||
// channel to signal heal sequence to stop (e.g. from the
|
||||
// heal-stop API)
|
||||
stopSignalCh chan struct{}
|
||||
// canceler to cancel heal sequence.
|
||||
cancelCtx context.CancelFunc
|
||||
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
|
||||
// Number of total items scanned
|
||||
scannedItemsCount int64
|
||||
// Number of total items scanned against item type
|
||||
scannedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
|
||||
// Holds the request-info for logging
|
||||
ctx context.Context
|
||||
|
||||
// used to lock this structure as it is concurrently accessed
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
|
||||
clientToken := mustGetUUID()
|
||||
|
||||
return &healSequence{
|
||||
respCh: make(chan healResult),
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
object: objPrefix,
|
||||
reportProgress: true,
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientToken: clientToken,
|
||||
clientAddress: clientAddr,
|
||||
forceStarted: forceStart,
|
||||
settings: hs,
|
||||
currentStatus: healSequenceStatus{
|
||||
Summary: healNotStartedStatus,
|
||||
HealSettings: hs,
|
||||
NumDisks: numDisks,
|
||||
updateLock: &sync.RWMutex{},
|
||||
},
|
||||
traverseAndHealDoneCh: make(chan error),
|
||||
stopSignalCh: make(chan struct{}),
|
||||
cancelCtx: cancel,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
// resetHealStatusCounters - reset the healSequence status counters between
|
||||
// each monthly background heal scanning activity.
|
||||
// This is used only in case of Background healing scenario, where
|
||||
// we use a single long running healSequence which reactively heals
|
||||
// objects passed to the SourceCh.
|
||||
func (h *healSequence) resetHealStatusCounters() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.currentStatus.Items = []madmin.HealResultItem{}
|
||||
h.lastSentResultIndex = 0
|
||||
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healedItemsMap = make(map[madmin.HealItemType]int64)
|
||||
h.healFailedItemsMap = make(map[string]int64)
|
||||
}
|
||||
|
||||
// getScannedItemsCount - returns a count of all scanned items
|
||||
func (h *healSequence) getScannedItemsCount() int64 {
|
||||
var count int64
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// getScannedItemsMap - returns map of all scanned items against type
|
||||
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// getHealedItemsMap - returns the map of all healed items against type
|
||||
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -406,19 +524,18 @@ func (h *healSequence) isQuitting() bool {
|
||||
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.currentStatus.updateLock.RLock()
|
||||
summary := h.currentStatus.Summary
|
||||
h.currentStatus.updateLock.RUnlock()
|
||||
return summary == healStoppedStatus || summary == healFinishedStatus
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
// background heal never ends
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return !h.endTime.IsZero()
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
func (h *healSequence) stop() {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
default:
|
||||
close(h.stopSignalCh)
|
||||
}
|
||||
h.cancelCtx()
|
||||
}
|
||||
|
||||
// pushHealResultItem - pushes a heal result item for consumption in
|
||||
@@ -445,29 +562,27 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
itemsLen = len(h.currentStatus.Items)
|
||||
if itemsLen == maxUnconsumedHealResultItems {
|
||||
// unlock and wait to check again if we can push
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// wait for a second, or quit if an external
|
||||
// stop signal is received or the
|
||||
// unconsumedTimer fires.
|
||||
select {
|
||||
// Check after a second
|
||||
case <-time.After(time.Second):
|
||||
h.mutex.Unlock()
|
||||
continue
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Unlock()
|
||||
// discard result and return.
|
||||
return errHealPushStopNDiscard
|
||||
return errHealStopSignalled
|
||||
|
||||
// Timeout if no results consumed for too
|
||||
// long.
|
||||
// Timeout if no results consumed for too long.
|
||||
case <-unconsumedTimer.C:
|
||||
h.mutex.Unlock()
|
||||
return errHealIdleTimeout
|
||||
|
||||
}
|
||||
}
|
||||
break
|
||||
@@ -484,13 +599,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
h.currentStatus.Items = append(h.currentStatus.Items, r)
|
||||
|
||||
// release lock
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// This is a "safe" point for the heal sequence to quit if
|
||||
// signaled externally.
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
h.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -502,40 +611,41 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// routine for completion, and (2) listens for external stop
|
||||
// signals. When either event happens, it sets the finish status for
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
// Set status as running
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
go h.traverseAndHeal(objAPI)
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
if ok {
|
||||
if err == nil {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
} else {
|
||||
// heal traversal had an error.
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = err.Error()
|
||||
} else {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.mutex.Unlock()
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
h.mutex.Unlock()
|
||||
|
||||
// drain traverse channel so the traversal
|
||||
// go-routine does not leak.
|
||||
@@ -548,64 +658,118 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(path string, healType madmin.HealItemType) error {
|
||||
var respCh = make(chan healResult)
|
||||
defer close(respCh)
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// Send heal request
|
||||
globalBackgroundHealRoutine.queueHealTask(healTask{path: path, responseCh: respCh, opts: h.settings})
|
||||
// Wait for answer and push result to the client
|
||||
res := <-respCh
|
||||
if !h.reportProgress {
|
||||
return nil
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
h.lastHealActivity = UTCNow()
|
||||
|
||||
bucketsOnly := true // heal buckets only, not objects.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case path := <-h.sourceCh:
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == nopHeal:
|
||||
case source.bucket == nopHeal:
|
||||
continue
|
||||
case path == SlashSeparator:
|
||||
case source.bucket == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
case source.bucket != "" && source.object == "":
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(path, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
|
||||
h.scannedItemsCount++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.traverseAndHealDoneCh:
|
||||
return nil
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -613,32 +777,30 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
// Try to pro-actively heal backend-encrypted file.
|
||||
if err := h.queueHealTask(healSource{
|
||||
bucket: minioMetaBucket,
|
||||
object: backendEncryptedFile,
|
||||
}, madmin.HealItemBucketMetadata); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the background ops prefix.
|
||||
if err := h.healMinioSysMeta(backgroundOpsMetaPrefix)(); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(bucketsOnly)
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -648,43 +810,40 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// quit signal is received, this routine cannot quit immediately and
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
if h.isQuitting() {
|
||||
err = errHealStopSignalled
|
||||
}
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
// which in-turn heals the respective meta directory path and any files in int.
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, func(bucket string, object string) error {
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(pathJoin(bucket, object), madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
// Skip metacache entries healing
|
||||
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
|
||||
return nil
|
||||
}
|
||||
return herr
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -696,39 +855,32 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
buckets, err := objAPI.ListBuckets(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -737,15 +889,11 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(bucket, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -753,12 +901,15 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -767,23 +918,26 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(pathJoin(bucket, object), madmin.HealItemObject)
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,13 +20,17 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + madmin.AdminAPIVersion
|
||||
adminPathPrefix = minioReservedBucketPath + "/admin"
|
||||
adminAPIVersionV2 = madmin.AdminAPIVersionV2
|
||||
adminAPIVersion = madmin.AdminAPIVersion
|
||||
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
|
||||
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
|
||||
)
|
||||
|
||||
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
|
||||
@@ -41,128 +45,184 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
/// Service operations
|
||||
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
// Harware Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/accountingusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountingUsageInfoHandler))
|
||||
|
||||
if globalIsDistXL || globalIsXL {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminAPIVersionPrefix+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
adminAPIVersionV2Prefix,
|
||||
}
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
}
|
||||
|
||||
// Enable config help in all modes.
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
|
||||
// Config KV history operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
|
||||
// Set config
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
if adminVersion == adminAPIVersionV2Prefix {
|
||||
// Info policy IAM v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies v2
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
|
||||
} else {
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// List policies latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
}
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Quota operations
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
}
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(httpTraceAll(adminAPI.KMSCreateKeyHandler)).Queries("key-id", "{key-id:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// Keep obdinfo for backward compatibility with mc
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
|
||||
}
|
||||
}
|
||||
|
||||
if enableIAMOps {
|
||||
// -- IAM APIs --
|
||||
|
||||
// Add policy IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name",
|
||||
"{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).
|
||||
Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// Info policy IAM
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Set user or group policy
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-user-or-group-policy").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminAPIVersionPrefix+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// List users
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// User info
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Add/Remove members from group
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminAPIVersionPrefix+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// List policies
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistXL {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/trace").HandlerFunc(adminAPI.TraceHandler)
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodGet).Path(adminAPIVersionPrefix + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
|
||||
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
|
||||
}
|
||||
|
||||
@@ -17,185 +17,20 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
|
||||
cpuhw "github.com/shirou/gopsutil/cpu"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
|
||||
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
|
||||
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
|
||||
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
Perf: dps,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
|
||||
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||
var cpuHardwares []cpuhw.InfoStat
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuHardware, err := cpuhw.Info()
|
||||
if err != nil {
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Addr: addr,
|
||||
CPUInfo: cpuHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
|
||||
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||
var networkHardwares []net.Interface
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
networkHardware, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
networkHardwares = append(networkHardwares, networkHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Addr: addr,
|
||||
NetworkInfo: networkHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalServerProperty - returns ServerDrivesPerfInfo for only the
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var disks []madmin.Disk
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
if globalIsDistErasure {
|
||||
addr = GetLocalPeer(endpointServerPools)
|
||||
}
|
||||
network := make(map[string]string)
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
@@ -204,33 +39,14 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
var di = madmin.Disk{
|
||||
DrivePath: endpoint.Path,
|
||||
}
|
||||
diInfo, err := disk.GetInfo(endpoint.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
|
||||
di.State = madmin.DriveStateMissing
|
||||
} else {
|
||||
di.State = madmin.DriveStateCorrupt
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := IsServerResolvable(endpoint); err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
di.State = madmin.DriveStateOk
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
}
|
||||
disks = append(disks, di)
|
||||
} else {
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
err := IsServerResolvable(endpoint)
|
||||
if err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -243,6 +59,5 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: disks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,11 +18,53 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
type ObjectIdentifier struct {
|
||||
// DeletedObject objects deleted
|
||||
type DeletedObject struct {
|
||||
DeleteMarker bool `xml:"DeleteMarker,omitempty"`
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
||||
ObjectName string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
|
||||
// MinIO extensions to support delete marker replication
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
|
||||
// MTime of DeleteMarker on source that needs to be propagated to replica
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
type DeleteMarkerMTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
|
||||
// Version ID of delete marker
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
@@ -37,5 +79,5 @@ type DeleteObjectsRequest struct {
|
||||
// Element to enable quiet mode for the request
|
||||
Quiet bool
|
||||
// List of objects to be deleted
|
||||
Objects []ObjectIdentifier `xml:"Object"`
|
||||
Objects []ObjectToDelete `xml:"Object"`
|
||||
}
|
||||
|
||||
@@ -25,18 +25,20 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/object/tagging"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -101,8 +103,28 @@ const (
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrRemoteDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrRemoteTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteLabelInUse
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrRemoteTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrBucketReplicationDisabledError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidVersionID
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
@@ -122,7 +144,8 @@ const (
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
ErrInvalidRegion
|
||||
ErrInvalidService
|
||||
ErrInvalidServiceS3
|
||||
ErrInvalidServiceSTS
|
||||
ErrInvalidRequestVersion
|
||||
ErrMissingSignTag
|
||||
ErrMissingSignHeadersTag
|
||||
@@ -150,12 +173,14 @@ const (
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrObjectLockConfigurationNotFound
|
||||
ErrObjectLockConfigurationNotAllowed
|
||||
ErrNoSuchObjectLockConfiguration
|
||||
ErrObjectLocked
|
||||
ErrInvalidRetentionDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrUnknownWORMModeDirective
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrInvalidTagDirective
|
||||
// Add new error codes here.
|
||||
@@ -210,6 +235,8 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrClientDisconnected
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
@@ -233,6 +260,10 @@ const (
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -332,19 +363,29 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError {
|
||||
apiErr, ok := e[errCode]
|
||||
if !ok {
|
||||
return e[ErrInternalError]
|
||||
apiErr = e[ErrInternalError]
|
||||
}
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
func (e errorCodeMap) ToAPIErr(errCode APIErrorCode) APIError {
|
||||
return e.ToAPIErrWithErr(errCode, nil)
|
||||
}
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
// descriptions for all the error responses.
|
||||
var errorCodes = errorCodeMap{
|
||||
@@ -440,7 +481,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The access key ID you provided does not exist in our records.",
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
@@ -518,9 +559,14 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrInvalidVersionID: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid version id specified",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "Indicates that the version ID specified in the request does not match an existing version.",
|
||||
Description: "The specified version does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
@@ -628,20 +674,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialDate: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialRegion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRegion: {
|
||||
@@ -649,17 +684,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Region does not match.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidService: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidServiceSTS: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidRequestVersion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
|
||||
@@ -730,8 +764,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
Code: "AccessDenied",
|
||||
Description: "There were headers present in the request which were not signed",
|
||||
@@ -757,11 +789,111 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketTaggingNotFound: {
|
||||
Code: "NoSuchTagSet",
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotFound: {
|
||||
Code: "ObjectLockConfigurationNotFoundError",
|
||||
Description: "Object Lock configuration does not exist for this bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotAllowed: {
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets.",
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrNoSuchCORSConfiguration: {
|
||||
Code: "NoSuchCORSConfiguration",
|
||||
Description: "The CORS configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchWebsiteConfiguration: {
|
||||
Code: "NoSuchWebsiteConfiguration",
|
||||
Description: "The specified bucket does not have a website configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationConfigurationNotFoundError: {
|
||||
Code: "ReplicationConfigurationNotFoundError",
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrRemoteDestinationNotFoundError: {
|
||||
Code: "RemoteDestinationNotFoundError",
|
||||
Description: "The remote destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
Code: "ReplicationDestinationMissingLockError",
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetNotFoundError: {
|
||||
Code: "XMinioAdminRemoteTargetNotFoundError",
|
||||
Description: "The remote target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error - please check remote service credentials and target bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteAlreadyExists: {
|
||||
Code: "XMinioAdminBucketRemoteAlreadyExists",
|
||||
Description: "The remote target already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteLabelInUse: {
|
||||
Code: "XMinioAdminBucketRemoteLabelInUse",
|
||||
Description: "The remote target with this label already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteRemoveDisallowed: {
|
||||
Code: "XMinioAdminRemoteRemoveDisallowed",
|
||||
Description: "This ARN is in use by an existing configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnTypeInvalid: {
|
||||
Code: "XMinioAdminRemoteARNTypeInvalid",
|
||||
Description: "The bucket remote ARN type is not valid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnInvalid: {
|
||||
Code: "XMinioAdminRemoteArnInvalid",
|
||||
Description: "The bucket remote ARN does not have correct format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetNotVersionedError: {
|
||||
Code: "RemoteTargetNotVersionedError",
|
||||
Description: "The remote target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
Code: "ReplicationSourceNotVersionedError",
|
||||
Description: "The replication source does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationBucketNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketReplicationDisabledError: {
|
||||
Code: "XMinioAdminBucketReplicationDisabled",
|
||||
Description: "Replication specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
@@ -792,6 +924,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectRestoreAlreadyInProgress: {
|
||||
Code: "RestoreAlreadyInProgress",
|
||||
Description: "Object restore is already in progress",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -854,7 +991,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1068,15 +1205,40 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrAdminBucketQuotaExceeded: {
|
||||
Code: "XMinioAdminBucketQuotaExceeded",
|
||||
Description: "Bucket quota exceeded",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchQuotaConfiguration: {
|
||||
Code: "XMinioAdminNoSuchQuotaConfiguration",
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrOperationTimedOut: {
|
||||
Code: "XMinioServerTimedOut",
|
||||
Description: "A timeout occurred while trying to lock a resource",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
Code: "RequestTimeout",
|
||||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrClientDisconnected: {
|
||||
Code: "ClientDisconnected",
|
||||
Description: "Client disconnected before response was ready",
|
||||
HTTPStatusCode: 499, // No official code, use nginx value.
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "SlowDown",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -1571,7 +1733,22 @@ var errorCodes = errorCodeMap{
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The account key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The specified service account is not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrPostPolicyConditionInvalidFormat: {
|
||||
Code: "PostPolicyInvalidKeyName",
|
||||
@@ -1588,7 +1765,17 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrOperationTimedOut
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.Canceled {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1641,7 +1828,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case crypto.ErrKMSAuthLogin:
|
||||
apiErr = ErrKMSAuthFailure
|
||||
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
apiErr = ErrSlowDown
|
||||
@@ -1703,6 +1890,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBucketAlreadyOwnedByYou
|
||||
case ObjectNotFound:
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
apiErr = ErrNoSuchVersion
|
||||
case ObjectAlreadyExists:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectNameInvalid:
|
||||
@@ -1747,6 +1940,38 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchLifecycleConfiguration
|
||||
case BucketSSEConfigNotFound:
|
||||
apiErr = ErrNoSuchBucketSSEConfig
|
||||
case BucketTaggingNotFound:
|
||||
apiErr = ErrBucketTaggingNotFound
|
||||
case BucketObjectLockConfigNotFound:
|
||||
apiErr = ErrObjectLockConfigurationNotFound
|
||||
case BucketQuotaConfigNotFound:
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketReplicationConfigNotFound:
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteLabelInUse:
|
||||
apiErr = ErrBucketRemoteLabelInUse
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
apiErr = ErrBucketRemoteArnTypeInvalid
|
||||
case BucketRemoteArnInvalid:
|
||||
apiErr = ErrBucketRemoteArnInvalid
|
||||
case BucketRemoteRemoveDisallowed:
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketRemoteTargetNotVersioned:
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1769,10 +1994,14 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrOverlappingFilterNotification
|
||||
case *event.ErrUnsupportedConfiguration:
|
||||
apiErr = ErrUnsupportedNotification
|
||||
case OperationTimedOut:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case BackendDown:
|
||||
apiErr = ErrBackendDown
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1809,12 +2038,24 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRequest].HTTPStatusCode,
|
||||
}
|
||||
case *xml.SyntaxError:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
@@ -1829,13 +2070,25 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case versioning.Error:
|
||||
apiErr = APIError{
|
||||
Code: "IllegalVersioningConfigurationException",
|
||||
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case lifecycle.Error:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tagging.Error:
|
||||
case replication.Error:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tags.Error:
|
||||
apiErr = APIError{
|
||||
Code: e.Code(),
|
||||
Description: e.Error(),
|
||||
@@ -1877,12 +2130,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,10 +24,12 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -36,9 +38,18 @@ func mustGetRequestID(t time.Time) string {
|
||||
return fmt.Sprintf("%X", t.UnixNano())
|
||||
}
|
||||
|
||||
// setEventStreamHeaders to allow proxies to avoid buffering proxy responses
|
||||
func setEventStreamHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
w.Header().Set(xhttp.CacheControl, "no-cache") // nginx to turn off buffering
|
||||
w.Header().Set("X-Accel-Buffering", "no") // nginx to turn off buffering
|
||||
}
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set the "Server" http header.
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO")
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
@@ -67,8 +78,15 @@ func encodeResponseJSON(response interface{}) []byte {
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// Write parts count
|
||||
func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header()[xhttp.AmzMpPartsCount] = []string{strconv.Itoa(len(objInfo.Parts))}
|
||||
}
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -92,46 +110,58 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if !objInfo.Expires.IsZero() {
|
||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
||||
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount != 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
if len(tags) > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
|
||||
}
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
w.Header().Set(k, v)
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
var totalObjectSize int64
|
||||
switch {
|
||||
case crypto.IsEncrypted(objInfo.UserDefined):
|
||||
totalObjectSize, err = objInfo.DecryptedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
var start, rangeLen int64
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// For providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -143,5 +173,31 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set(xhttp.ContentRange, contentRange)
|
||||
}
|
||||
|
||||
// Set the relevant version ID as part of the response header.
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
if objInfo.ReplicationStatus.String() != "" {
|
||||
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
|
||||
}
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -33,10 +34,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -233,10 +236,23 @@ type Bucket struct {
|
||||
|
||||
// ObjectVersion container for object version metadata
|
||||
type ObjectVersion struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
|
||||
Object
|
||||
VersionID string `xml:"VersionId"`
|
||||
IsLatest bool
|
||||
VersionID string `xml:"VersionId"`
|
||||
|
||||
isDeleteMarker bool
|
||||
}
|
||||
|
||||
// MarshalXML - marshal ObjectVersion
|
||||
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if o.isDeleteMarker {
|
||||
start.Name.Local = "DeleteMarker"
|
||||
} else {
|
||||
start.Name.Local = "Version"
|
||||
}
|
||||
|
||||
type objectVersionWrapper ObjectVersion
|
||||
return e.EncodeElement(objectVersionWrapper(o), start)
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string.
|
||||
@@ -331,9 +347,10 @@ type CompleteMultipartUploadResponse struct {
|
||||
|
||||
// DeleteError structure.
|
||||
type DeleteError struct {
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// DeleteObjectsResponse container for multiple object deletes.
|
||||
@@ -341,7 +358,7 @@ type DeleteObjectsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
||||
|
||||
// Collection of all deleted objects
|
||||
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
|
||||
DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
|
||||
|
||||
// Collection of errors deleting certain objects.
|
||||
Errors []DeleteError `xml:"Error,omitempty"`
|
||||
@@ -380,8 +397,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
if strings.HasPrefix(r.Host, bucket+"."+domain) {
|
||||
u.Path = path.Join(SlashSeparator, object)
|
||||
break
|
||||
}
|
||||
@@ -392,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -400,7 +416,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(timeFormatAMZLong)
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
|
||||
listbuckets = append(listbuckets, listbucket)
|
||||
}
|
||||
|
||||
@@ -411,9 +427,8 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -424,20 +439,28 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
content.VersionID = object.VersionID
|
||||
if content.VersionID == "" {
|
||||
content.VersionID = nullVersionID
|
||||
}
|
||||
content.IsLatest = object.IsLatest
|
||||
content.isDeleteMarker = object.DeleteMarker
|
||||
versions = append(versions, content)
|
||||
}
|
||||
|
||||
data.Name = bucket
|
||||
data.Versions = versions
|
||||
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
||||
@@ -445,8 +468,11 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.NextVersionIDMarker = resp.NextVersionIDMarker
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -458,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -470,12 +495,16 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
contents = append(contents, content)
|
||||
}
|
||||
@@ -487,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -501,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -516,21 +545,29 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
@@ -547,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -561,7 +600,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
|
||||
return CopyObjectResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,7 +608,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
|
||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
||||
return CopyObjectPartResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -588,7 +627,8 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: etag,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,7 +653,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.PartNumber = part.PartNumber
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
newPart.Size = part.Size
|
||||
newPart.LastModified = part.LastModified.UTC().Format(timeFormatAMZLong)
|
||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -643,18 +683,21 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
|
||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
||||
}
|
||||
return listMultipartUploadsResponse
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse {
|
||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse {
|
||||
deleteResp := DeleteObjectsResponse{}
|
||||
if !quiet {
|
||||
deleteResp.DeletedObjects = deletedObjects
|
||||
}
|
||||
if len(errs) == len(deletedObjects) {
|
||||
deleteResp.DeletedObjects = nil
|
||||
}
|
||||
deleteResp.Errors = errs
|
||||
return deleteResp
|
||||
}
|
||||
@@ -718,6 +761,10 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
@@ -753,17 +800,6 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeVersionMismatchResponse - writes custom error responses for version mismatches.
|
||||
func writeVersionMismatchResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, isJSON bool) {
|
||||
if isJSON {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.String(), w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
writeResponse(w, err.HTTPStatusCode, encodeResponseJSON(errorResponse), mimeJSON)
|
||||
} else {
|
||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
@@ -783,38 +819,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseXML - similar to writeErrorResponse,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
|
||||
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
// Server with virtual domain name.
|
||||
{
|
||||
request: &http.Request{
|
||||
Host: "mys3.bucket.org",
|
||||
Host: "mybucket.mys3.bucket.org",
|
||||
Header: map[string][]string{},
|
||||
},
|
||||
domains: []string{"mys3.bucket.org"},
|
||||
@@ -87,7 +87,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
request: &http.Request{
|
||||
Host: "mys3.bucket.org",
|
||||
Host: "mybucket.mys3.bucket.org",
|
||||
Header: map[string][]string{
|
||||
"X-Forwarded-Scheme": {httpsScheme},
|
||||
},
|
||||
@@ -98,11 +98,14 @@ func TestObjectLocation(t *testing.T) {
|
||||
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("expected %s, got %s", testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -17,10 +17,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
@@ -29,203 +32,351 @@ func newHTTPServerFn() *xhttp.Server {
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = o
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
if r.URL.IsAbs() {
|
||||
return r.URL.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, err := net.SplitHostPort(getHost(r))
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
}
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
// usecase where we need to make sure that
|
||||
// minio.<namespace>.svc.<cluster_domain> is ignored
|
||||
// by the bucketDNS style to ensure that path style
|
||||
// is available and honored at this domain.
|
||||
//
|
||||
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
|
||||
// makes sure that buckets are routed through this matcher
|
||||
// to match for `<bucket>`
|
||||
return host != minioReservedBucket+"."+domainName
|
||||
}).Host("{bucket:.+}."+domainName).Subrouter())
|
||||
} else {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
for _, bucket := range routers {
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler)))
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler))).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler))).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler))).Queries("retention", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler))).Queries("retention", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler))).Queries("encryption", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketReplicationConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplicationconfiguration", httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listennotification", httpTraceAll(api.ListenNotificationHandler))).Queries("events", "{events:.*}")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler))).Queries("acl", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler))).Queries("cors", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler))).Queries("website", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler))).Queries("accelerate", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler))).Queries("requestPayment", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler))).Queries("logging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler))).Queries("website", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler))).Queries("list-type", "2", "metadata", "true")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketReplicationConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketreplicationconfiguration", httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// GetObjectRetention
|
||||
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler))).Queries("encryption", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler))).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler)))
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler)))
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler)))
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketReplication
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketreplicationconfiguration", httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler))).Queries("encryption", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("restoreobject", httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", httpTraceAll(api.ListenNotificationHandler))).Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler)))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
func corsHandler(handler http.Handler) http.Handler {
|
||||
commonS3Headers := []string{
|
||||
xhttp.Date,
|
||||
xhttp.ETag,
|
||||
xhttp.ServerInfo,
|
||||
xhttp.Connection,
|
||||
xhttp.AcceptRanges,
|
||||
xhttp.ContentRange,
|
||||
xhttp.ContentEncoding,
|
||||
xhttp.ContentLength,
|
||||
xhttp.ContentType,
|
||||
xhttp.ContentDisposition,
|
||||
xhttp.LastModified,
|
||||
xhttp.ContentLanguage,
|
||||
xhttp.CacheControl,
|
||||
xhttp.RetryAfter,
|
||||
xhttp.AmzBucketRegion,
|
||||
xhttp.Expires,
|
||||
"X-Amz*",
|
||||
"x-amz*",
|
||||
"*",
|
||||
}
|
||||
|
||||
return cors.New(cors.Options{
|
||||
AllowOriginFunc: func(origin string) bool {
|
||||
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
|
||||
if wildcard.MatchSimple(allowedOrigin, origin) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
},
|
||||
AllowedMethods: []string{
|
||||
http.MethodGet,
|
||||
http.MethodPut,
|
||||
http.MethodHead,
|
||||
http.MethodPost,
|
||||
http.MethodDelete,
|
||||
http.MethodOptions,
|
||||
http.MethodPatch,
|
||||
},
|
||||
AllowedHeaders: commonS3Headers,
|
||||
ExposedHeaders: commonS3Headers,
|
||||
AllowCredentials: true,
|
||||
}).Handler(handler)
|
||||
}
|
||||
|
||||
@@ -26,12 +26,15 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
@@ -118,9 +121,7 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeUnknown
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
s3Err := ErrAccessDenied
|
||||
@@ -129,7 +130,7 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
|
||||
// We only support admin credentials to access admin APIs.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
@@ -139,14 +140,24 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iamp
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -173,15 +184,13 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r)
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
|
||||
token := getSessionToken(r)
|
||||
if token == "" {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
@@ -204,15 +213,16 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set, allow the claim.
|
||||
if _, ok := claims.Lookup(ldapUser); ok {
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
_, pok := claims.Lookup(iamPolicyClaimName())
|
||||
if !pok {
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
@@ -226,7 +236,7 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(context.Background(), err, logger.Application)
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
@@ -241,12 +251,15 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r)
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(context.Background(), err)
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
@@ -271,7 +284,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
var cred auth.Credentials
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
return accessKey, owner, ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return accessKey, owner, s3Err
|
||||
@@ -320,8 +333,12 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
|
||||
// Anonymous checks are not meant for ListBuckets action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
@@ -333,8 +350,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -347,7 +382,25 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -410,7 +463,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
r.Body = reader
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -460,18 +513,118 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, nil, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, claims, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
conditions := getConditionValues(r, "", "", nil)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
})
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrAccessDenied
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
@@ -487,10 +640,23 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
if action == iampolicy.PutObjectRetentionAction &&
|
||||
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
|
||||
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
Action: policy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", "", nil),
|
||||
IsOwner: false,
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
|
||||
"Content-Encoding": []string{streamingContentEncoding},
|
||||
},
|
||||
Method: "PUT",
|
||||
Method: http.MethodPut,
|
||||
},
|
||||
authT: authTypeStreamingSigned,
|
||||
},
|
||||
@@ -111,7 +111,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
},
|
||||
Method: "POST",
|
||||
Method: http.MethodPost,
|
||||
},
|
||||
authT: authTypePostPolicy,
|
||||
},
|
||||
@@ -212,7 +212,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// creating an input HTTP request.
|
||||
// Only the query parameters are relevant for this particular test.
|
||||
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error initializing input HTTP request: %v", err)
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// creating an input HTTP request.
|
||||
// Only the query parameters are relevant for this particular test.
|
||||
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error initializing input HTTP request: %v", err)
|
||||
}
|
||||
@@ -369,15 +369,15 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
s3Error APIErrorCode
|
||||
}{
|
||||
// When request is unsigned, access denied is returned.
|
||||
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
{mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
// Empty Content-Md5 header.
|
||||
{mustNewSignedEmptyMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
{mustNewSignedEmptyMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
// Short Content-Md5 header.
|
||||
{mustNewSignedShortMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
{mustNewSignedShortMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
// When request is properly signed, but has bad Content-MD5 header.
|
||||
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
{mustNewSignedBadMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
// When request is properly signed, error is none.
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -391,6 +391,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -412,11 +413,11 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
Request *http.Request
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
|
||||
{Request: mustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
|
||||
{Request: mustNewSignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
@@ -425,3 +426,48 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
globalActiveCred = creds
|
||||
|
||||
testCases := []struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{"", "", ErrInvalidAccessKeyID},
|
||||
{"admin", "", ErrSignatureDoesNotMatch},
|
||||
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
|
||||
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"", "mypassword", ErrInvalidAccessKeyID},
|
||||
{"admin", "mypassword", ErrNone},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
||||
if s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,8 +29,10 @@ import (
|
||||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
type healTask struct {
|
||||
path string
|
||||
opts madmin.HealOpts
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
}
|
||||
@@ -52,22 +54,39 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
// No need to wait run at full speed.
|
||||
if maxIO <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitCount := 10
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
maxIOFn := func() int {
|
||||
return maxIO + globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers()
|
||||
}
|
||||
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
for httpServer.GetRequestCount() >= maxIOFn() {
|
||||
time.Sleep(waitTick)
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
if waitCount == 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited %d times, resuming", waitCount)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run() {
|
||||
ctx := context.Background()
|
||||
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
@@ -75,32 +94,29 @@ func (h *healRoutine) run() {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.Nodes()))
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
bucket, object := path2BucketObject(task.path)
|
||||
switch {
|
||||
case bucket == "" && object == "":
|
||||
res, err = bgHealDiskFormat(ctx, task.opts)
|
||||
case bucket != "" && object == "":
|
||||
res, err = bgHealBucket(ctx, bucket, task.opts)
|
||||
case bucket != "" && object != "":
|
||||
res, err = bgHealObject(ctx, bucket, object, task.opts)
|
||||
}
|
||||
if task.responseCh != nil {
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
case task.bucket == nopHeal:
|
||||
continue
|
||||
case task.bucket == SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case task.bucket != "" && task.object == "":
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
|
||||
case task.bucket != "" && task.object != "":
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-GlobalServiceDoneCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initHealRoutine() *healRoutine {
|
||||
func newHealRoutine() *healRoutine {
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
doneCh: make(chan struct{}),
|
||||
@@ -108,45 +124,10 @@ func initHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func startBackgroundHealing() {
|
||||
ctx := context.Background()
|
||||
|
||||
var objAPI ObjectLayer
|
||||
for {
|
||||
objAPI = newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = initHealRoutine()
|
||||
go globalBackgroundHealRoutine.run()
|
||||
|
||||
// Launch the background healer sequence to track
|
||||
// background healing operations
|
||||
info := objAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
|
||||
nh := newBgHealSequence(numDisks)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
func initBackgroundHealing() {
|
||||
go startBackgroundHealing()
|
||||
}
|
||||
|
||||
// bgHealDiskFormat - heals format.json, return value indicates if a
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(ctx, opts.DryRun)
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
res, err := objAPI.HealFormat(ctx, opts.DryRun)
|
||||
|
||||
// return any error, ignore error returned when disks have
|
||||
// already healed.
|
||||
@@ -154,37 +135,5 @@ func bgHealDiskFormat(ctx context.Context, opts madmin.HealOpts) (madmin.HealRes
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// bghealBucket - traverses and heals given bucket
|
||||
func bgHealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
return objectAPI.HealBucket(ctx, bucket, opts.DryRun, opts.Remove)
|
||||
}
|
||||
|
||||
// bgHealObject - heal the given object and record result
|
||||
func bgHealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return madmin.HealResultItem{}, errServerNotInitialized
|
||||
}
|
||||
return objectAPI.HealObject(ctx, bucket, object, opts.DryRun, opts.Remove, opts.ScanMode)
|
||||
}
|
||||
|
||||
@@ -18,110 +18,198 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 10
|
||||
const (
|
||||
defaultMonitorNewDiskInterval = time.Second * 10
|
||||
healingTrackerFilename = ".healing.bin"
|
||||
)
|
||||
|
||||
func initLocalDisksAutoHeal() {
|
||||
go monitorLocalDisksAndHeal()
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
}
|
||||
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
bgSeq := mustGetHealSequence(ctx)
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
// Start with format healing
|
||||
if err := bgSeq.healDiskFormat(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal() {
|
||||
// Wait until the object layer is ready
|
||||
var objAPI ObjectLayer
|
||||
for {
|
||||
objAPI = newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
z, ok := objAPI.(*xlZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
for {
|
||||
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if found {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
wait:
|
||||
for {
|
||||
time.Sleep(defaultMonitorNewDiskInterval)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- SlashSeparator
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- nopHeal
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
if zoneIdx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RLock()
|
||||
// Protect reading reference format.
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[zoneIdx].format, format)
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s zone", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
}
|
||||
|
||||
lbDisks := z.serverPools[i].sets[setIndex].getOnlineDisks()
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s zone complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
return
|
||||
}
|
||||
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodehealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := healingTracker{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,11 +28,6 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Prepare XL/FS backend for benchmark.
|
||||
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
|
||||
return prepareTestBackend(instanceType)
|
||||
}
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.PutObject().
|
||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
@@ -40,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -81,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -132,10 +127,12 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -146,10 +143,12 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
runPutObjectPartBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -160,10 +159,12 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
runPutObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -180,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -189,7 +190,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
@@ -239,10 +240,12 @@ func generateBytesData(size int) []byte {
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -253,10 +256,12 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||
}
|
||||
@@ -273,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -317,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -326,7 +331,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
@@ -27,6 +27,14 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
type errHashMismatch struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (err *errHashMismatch) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow *io.PipeWriter
|
||||
@@ -73,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -111,7 +119,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -132,9 +140,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err = fmt.Errorf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))
|
||||
logger.LogIf(context.Background(), err)
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user