Compare commits
888 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
96c0ce1f0c | ||
|
|
fe11e9047d | ||
|
|
ce0e17b62b | ||
|
|
303be1866d | ||
|
|
a6113b2315 | ||
|
|
3ca046b408 | ||
|
|
4ec45753e6 | ||
|
|
e6ea5c2703 | ||
|
|
790833f3b2 | ||
|
|
02aecb2fc1 | ||
|
|
7cbca43eb1 | ||
|
|
2f564437ae | ||
|
|
ae4ded7fd1 | ||
|
|
bdd094bc39 | ||
|
|
e6fa410778 | ||
|
|
350c5ff8f8 | ||
|
|
f139a19238 | ||
|
|
e90efd73a2 | ||
|
|
81c907b4bf | ||
|
|
ab49471f33 | ||
|
|
aabf053d2f | ||
|
|
f839bb5a0a | ||
|
|
91130e884b | ||
|
|
2ff655a745 | ||
|
|
0422eda6a2 | ||
|
|
31e6f60847 | ||
|
|
7742238495 | ||
|
|
3ad41fe89d | ||
|
|
f96ed3769f | ||
|
|
a75fafdbe2 | ||
|
|
a58b7874ef | ||
|
|
6990de9c94 | ||
|
|
75a8e81f8f | ||
|
|
519c0077a9 | ||
|
|
734d07a532 | ||
|
|
df93102235 | ||
|
|
39f3d5493b | ||
|
|
14a7ae8586 | ||
|
|
692ff41ef7 | ||
|
|
86409fa93d | ||
|
|
7bc47a14cc | ||
|
|
4a31b31ca6 | ||
|
|
9263be8cca | ||
|
|
73e308079a | ||
|
|
08b24620c0 | ||
|
|
95675b0c9a | ||
|
|
251c1ef6da | ||
|
|
0fa430c1da | ||
|
|
f60b6eb82e | ||
|
|
1ebf6f146a | ||
|
|
8f7fe0405e | ||
|
|
9a34fd5c4a | ||
|
|
b9e3a8b5ac | ||
|
|
f794fe79e3 | ||
|
|
0f9e125cf3 | ||
|
|
d778d9493f | ||
|
|
70d2c2ccc9 | ||
|
|
9dea7020f0 | ||
|
|
990d074f7d | ||
|
|
e413f05397 | ||
|
|
d1b1fee080 | ||
|
|
9738d605e4 | ||
|
|
7ff8128f15 | ||
|
|
10099357b6 | ||
|
|
80b8ce89a4 | ||
|
|
0745736e28 | ||
|
|
0b766288ef | ||
|
|
598ca0569c | ||
|
|
d295ce5708 | ||
|
|
b5a3d79bce | ||
|
|
17a5ff51ff | ||
|
|
0784a0c33a | ||
|
|
3595cb1267 | ||
|
|
0bcb1b679d | ||
|
|
a3017c724e | ||
|
|
07859ef48b | ||
|
|
267d7bf0a9 | ||
|
|
be83dfc52a | ||
|
|
ca88ca753c | ||
|
|
f86d3538f6 | ||
|
|
1c3590078d | ||
|
|
aa158228f9 | ||
|
|
8747834c69 | ||
|
|
2c1e37197b | ||
|
|
50c10a5087 | ||
|
|
9f4ad873bc | ||
|
|
4683a623dc | ||
|
|
06899210a7 | ||
|
|
cbdab62c1e | ||
|
|
8df6112204 | ||
|
|
8e8ddf7233 | ||
|
|
311ab43d4c | ||
|
|
97692bc772 | ||
|
|
21016265e5 | ||
|
|
54120107ce | ||
|
|
9bf5990ea9 | ||
|
|
74f7cf24ae | ||
|
|
fb28aa847b | ||
|
|
0724205f35 | ||
|
|
b72cac4cf3 | ||
|
|
47d715f642 | ||
|
|
bd77f29fc4 | ||
|
|
d1e1205036 | ||
|
|
71753e21e0 | ||
|
|
fde3299bf3 | ||
|
|
1a1f00fa15 | ||
|
|
4a1efabda4 | ||
|
|
3b88a646ec | ||
|
|
2294e53a0b | ||
|
|
f0819cce75 | ||
|
|
1e11b4629f | ||
|
|
5c72a34fa8 | ||
|
|
b9277c8030 | ||
|
|
8c76e1353e | ||
|
|
ad382799b1 | ||
|
|
68de5a6f6a | ||
|
|
4ea31da889 | ||
|
|
0a796505c1 | ||
|
|
37749f4623 | ||
|
|
86e0d272f3 | ||
|
|
8527f22df1 | ||
|
|
b456292295 | ||
|
|
03fdbc3ec2 | ||
|
|
4c773f7068 | ||
|
|
d8e07f2c41 | ||
|
|
5412d730c1 | ||
|
|
fe9f23e632 | ||
|
|
422898d9b3 | ||
|
|
b686bb9c83 | ||
|
|
5e5cdc581d | ||
|
|
02cfa774be | ||
|
|
3a2f89b3c0 | ||
|
|
6135f072d2 | ||
|
|
7331659d3d | ||
|
|
e63a44b734 | ||
|
|
6b14c4ab1e | ||
|
|
4bf90ca67f | ||
|
|
e0655e24f2 | ||
|
|
bfc36aed89 | ||
|
|
be7f67268d | ||
|
|
a982baff27 | ||
|
|
51222cc664 | ||
|
|
f53c5a020e | ||
|
|
5b30bbda92 | ||
|
|
858e2a43df | ||
|
|
df9894e275 | ||
|
|
ca77ee1c0e | ||
|
|
592f2f23a3 | ||
|
|
c49a80db41 | ||
|
|
46275c6547 | ||
|
|
0994ed9783 | ||
|
|
eb95353cb1 | ||
|
|
029758cb20 | ||
|
|
649035677f | ||
|
|
646d6917ed | ||
|
|
d9db7f3308 | ||
|
|
6a8c62f9fd | ||
|
|
4442382c16 | ||
|
|
00124c56d9 | ||
|
|
2c32c2149e | ||
|
|
734f258878 | ||
|
|
0e0c53bba4 | ||
|
|
5cc23ae052 | ||
|
|
6fd088f448 | ||
|
|
d6d770c1b1 | ||
|
|
b07df5cae1 | ||
|
|
c107728676 | ||
|
|
ba5215561f | ||
|
|
4eb45c9a0f | ||
|
|
187129a907 | ||
|
|
284a2b9021 | ||
|
|
0b53e30ecb | ||
|
|
bd2131ba34 | ||
|
|
73a41a725a | ||
|
|
1aec168c84 | ||
|
|
21a549a83b | ||
|
|
8a16a1a1a9 | ||
|
|
ad726b49b4 | ||
|
|
db2241066b | ||
|
|
f1cc16e788 | ||
|
|
3820a905e0 | ||
|
|
2042d4873c | ||
|
|
f9be783f3e | ||
|
|
23773bb32b | ||
|
|
2fd7545b6c | ||
|
|
71b97fd3ac | ||
|
|
03991c5d41 | ||
|
|
9c042a503b | ||
|
|
614060764d | ||
|
|
4a678ad70f | ||
|
|
a3ba8188d7 | ||
|
|
2760fc86af | ||
|
|
abb14aeec1 | ||
|
|
8ceb2a93fd | ||
|
|
c2f16ee846 | ||
|
|
071c004f8b | ||
|
|
6484453fc6 | ||
|
|
a0d0645128 | ||
|
|
1738eb24b1 | ||
|
|
253194e491 | ||
|
|
736e58dd68 | ||
|
|
907a171edd | ||
|
|
ed6d2a100f | ||
|
|
effe131090 | ||
|
|
01498a3e34 | ||
|
|
18063bf25c | ||
|
|
57f0176759 | ||
|
|
dbbed6f7f0 | ||
|
|
7fbfdceba3 | ||
|
|
9dda9fb903 | ||
|
|
f1418a50f0 | ||
|
|
017954e7ea | ||
|
|
806625cbff | ||
|
|
045e30f2c1 | ||
|
|
c6a9a94f94 | ||
|
|
8e7c00f3d4 | ||
|
|
d1ed1da8c6 | ||
|
|
23e8390997 | ||
|
|
71403be912 | ||
|
|
f28d02b7f2 | ||
|
|
e0cb814f3f | ||
|
|
98a08e1644 | ||
|
|
3047121255 | ||
|
|
5a7f92481e | ||
|
|
0d45c38782 | ||
|
|
56d1b227cf | ||
|
|
061fa0635c | ||
|
|
6e138f955e | ||
|
|
bea87a5a20 | ||
|
|
2b4eb87d77 | ||
|
|
799758e54f | ||
|
|
1f9abbee4d | ||
|
|
fdf0ae9167 | ||
|
|
00eb6f6bc9 | ||
|
|
66174692a2 | ||
|
|
849fcf0127 | ||
|
|
209680e89f | ||
|
|
e0c04a2da0 | ||
|
|
27d9bd04e5 | ||
|
|
511424a287 | ||
|
|
bebcf4f004 | ||
|
|
eafa775952 | ||
|
|
66b4a862e0 | ||
|
|
9603489dd3 | ||
|
|
b302c8a5f4 | ||
|
|
4de88e87bb | ||
|
|
b880796aef | ||
|
|
37a5d5d7a0 | ||
|
|
3cac262dd1 | ||
|
|
e6ab4db6b8 | ||
|
|
ca989eb0b3 | ||
|
|
d778d034e7 | ||
|
|
df08fd1f03 | ||
|
|
ac82f416a4 | ||
|
|
f7f9517b6a | ||
|
|
90cff10e2b | ||
|
|
81caf35926 | ||
|
|
5726cef3ca | ||
|
|
5fdf47b118 | ||
|
|
8b74a72b21 | ||
|
|
eec69d6796 | ||
|
|
0537a21b79 | ||
|
|
4c54ed8748 | ||
|
|
a4006e23a0 | ||
|
|
b17dc81540 | ||
|
|
d73c4f09f3 | ||
|
|
4c81201f95 | ||
|
|
cd8d511d3d | ||
|
|
899a2fa1c7 | ||
|
|
17e17da00d | ||
|
|
a5da9120f3 | ||
|
|
aa12d75d75 | ||
|
|
dd4a2d7419 | ||
|
|
6fcbdd5607 | ||
|
|
3831cc9e3b | ||
|
|
230fc0d186 | ||
|
|
7f9498f43f | ||
|
|
1cf322b7d4 | ||
|
|
3168e93730 | ||
|
|
0b1c824618 | ||
|
|
c851e022b7 | ||
|
|
6f45e303f5 | ||
|
|
5ad032826a | ||
|
|
84bf4624a4 | ||
|
|
dff37aa33d | ||
|
|
d12831eb07 | ||
|
|
4a36cd7035 | ||
|
|
00555c747e | ||
|
|
03490c811b | ||
|
|
48d2c03250 | ||
|
|
ed78854cea | ||
|
|
e60834838f | ||
|
|
d616d8a857 | ||
|
|
24cab7f9df | ||
|
|
b2536476c9 | ||
|
|
02c1a08a5b | ||
|
|
5c47ce456e | ||
|
|
8ea55f9dba | ||
|
|
80e3dce631 | ||
|
|
80fab03b63 | ||
|
|
730d2dc7be | ||
|
|
0ee9678190 | ||
|
|
34859c6d4b | ||
|
|
b1c99e88ac | ||
|
|
0104af6bcc | ||
|
|
224daee391 | ||
|
|
34ea1d2167 | ||
|
|
9d95937018 | ||
|
|
74a7889a3e | ||
|
|
fa01e640f5 | ||
|
|
f355374962 | ||
|
|
bda0fe3150 | ||
|
|
b70995dd60 | ||
|
|
4b6264da7d | ||
|
|
48919de301 | ||
|
|
eb3ded420e | ||
|
|
eb2934f0c1 | ||
|
|
b7438fe4e6 | ||
|
|
ce6cef6855 | ||
|
|
a966ccd17d | ||
|
|
493c714663 | ||
|
|
e959c5d71c | ||
|
|
4a2928eb49 | ||
|
|
af88772a78 | ||
|
|
1dce6918c2 | ||
|
|
9109148474 | ||
|
|
eaaf05a7cc | ||
|
|
52e21bc853 | ||
|
|
16e1a25bc0 | ||
|
|
958661cbb5 | ||
|
|
6019628f7d | ||
|
|
0987069e37 | ||
|
|
6a0372be6c | ||
|
|
c13afd56e8 | ||
|
|
96997d2b21 | ||
|
|
86a3319d41 | ||
|
|
a694ba93d9 | ||
|
|
9f60e84ce1 | ||
|
|
a9aaea0d67 | ||
|
|
572b1721b2 | ||
|
|
b0e1d4ce78 | ||
|
|
eb19c8af40 | ||
|
|
746f1585eb | ||
|
|
2d58a8d861 | ||
|
|
0037951b6e | ||
|
|
fbd1c5f51a | ||
|
|
1c6781757c | ||
|
|
b4e3956e69 | ||
|
|
c51229493b | ||
|
|
631d55aa22 | ||
|
|
8a291e1dc0 | ||
|
|
650dccfa9e | ||
|
|
d08b4b147d | ||
|
|
9a703befe6 | ||
|
|
9a1615768d | ||
|
|
37da0c647e | ||
|
|
2acb530ccd | ||
|
|
3e1fb17b70 | ||
|
|
a89d6b8e3d | ||
|
|
1c085f7d1a | ||
|
|
4b6585d249 | ||
|
|
9ffad7fceb | ||
|
|
18725679c4 | ||
|
|
ba8a8ad818 | ||
|
|
102ad60dee | ||
|
|
cb61e50b51 | ||
|
|
859ef52886 | ||
|
|
f04a1f220c | ||
|
|
cd380251b3 | ||
|
|
92cd1eed45 | ||
|
|
db32a24cb6 | ||
|
|
2d96940826 | ||
|
|
e730da1438 | ||
|
|
46ee8659b4 | ||
|
|
73a6b4ea11 | ||
|
|
c1b88c17cc | ||
|
|
a359e36e35 | ||
|
|
0a2e6d58a5 | ||
|
|
7e80afdd7f | ||
|
|
1b119557c2 | ||
|
|
7778fef6bb | ||
|
|
ea1803417f | ||
|
|
ea5094e842 | ||
|
|
5a974fb10c | ||
|
|
9acdeab73d | ||
|
|
d19b434ffc | ||
|
|
17a1eda702 | ||
|
|
7d50a0cfea | ||
|
|
ceff7bcca5 | ||
|
|
7d1734d033 | ||
|
|
03ec6adfd0 | ||
|
|
309b10f201 | ||
|
|
2a8e40f19f | ||
|
|
5f7bd2b1da | ||
|
|
c097ce9c32 | ||
|
|
caad314faa | ||
|
|
bc2ebe0021 | ||
|
|
21e8440423 | ||
|
|
11aa393ba7 | ||
|
|
d0c910a6f3 | ||
|
|
81c90ae430 | ||
|
|
d15a5ad4cc | ||
|
|
0ff246653b | ||
|
|
113bcbdb78 | ||
|
|
95411228db | ||
|
|
23774353b7 | ||
|
|
052b5262ff | ||
|
|
a2a5ec93d3 | ||
|
|
331c517a5b | ||
|
|
27a774cbe9 | ||
|
|
8e6787a302 | ||
|
|
59352d0ac2 | ||
|
|
75d44b3bae | ||
|
|
95ae6c4b49 | ||
|
|
98ca770f81 | ||
|
|
ccd967e3be | ||
|
|
0ebb73ee2e | ||
|
|
c8b84a0e9e | ||
|
|
3acb5cff45 | ||
|
|
ab801ad3d4 | ||
|
|
74116204ce | ||
|
|
2eb5f934d8 | ||
|
|
b43d376a87 | ||
|
|
e4a44f6224 | ||
|
|
0272973175 | ||
|
|
adca28801d | ||
|
|
d2a3f92452 | ||
|
|
ede86845e5 | ||
|
|
e57c742674 | ||
|
|
bb5976d727 | ||
|
|
670724184c | ||
|
|
f7c1a59de1 | ||
|
|
01a2ccc52f | ||
|
|
51ba1dac49 | ||
|
|
a4463dd40f | ||
|
|
83a82d818e | ||
|
|
1d1c4430b2 | ||
|
|
4e00b47b52 | ||
|
|
43e6d1ce2d | ||
|
|
30da442a85 | ||
|
|
038d91feaa | ||
|
|
e7ba78beee | ||
|
|
ab43804efd | ||
|
|
1c865dd119 | ||
|
|
b32d0a5b60 | ||
|
|
79e21601b0 | ||
|
|
34253aa595 | ||
|
|
79ed7ce451 | ||
|
|
900eebb9a4 | ||
|
|
6914b2c99d | ||
|
|
0dd3a08169 | ||
|
|
f8f290e848 | ||
|
|
9179cdfc9d | ||
|
|
76b6dc0112 | ||
|
|
ce303f5c7e | ||
|
|
b4b7a18497 | ||
|
|
1e2ebc9945 | ||
|
|
a49e3647b6 | ||
|
|
954e17c3d0 | ||
|
|
2a9819aff8 | ||
|
|
8049184dcc | ||
|
|
6c6137b2e7 | ||
|
|
19c4f3082b | ||
|
|
433c2831ae | ||
|
|
9138b2b503 | ||
|
|
6d64aab420 | ||
|
|
77509ce391 | ||
|
|
adcaa6f9de | ||
|
|
ce129efa09 | ||
|
|
121164db56 | ||
|
|
195f95196e | ||
|
|
a20d4568a2 | ||
|
|
e656beb915 | ||
|
|
cd04600862 | ||
|
|
3acc0ebb81 | ||
|
|
88daaef76b | ||
|
|
1cdaced8b6 | ||
|
|
0b8255529a | ||
|
|
019fe69a57 | ||
|
|
d90ab904e7 | ||
|
|
6ae30b21c9 | ||
|
|
b16781846e | ||
|
|
5ce82b45da | ||
|
|
e99bc177c0 | ||
|
|
b68bc75dad | ||
|
|
d61eac080b | ||
|
|
d8be9f12a2 | ||
|
|
0cf4539fe8 | ||
|
|
db6bba709a | ||
|
|
2174a22835 | ||
|
|
a8dd7b3eda | ||
|
|
25a55bae6f | ||
|
|
fe157166ca | ||
|
|
f7259adf83 | ||
|
|
6669560cb9 | ||
|
|
b46ab7e921 | ||
|
|
27266f8a54 | ||
|
|
1b6ba0d062 | ||
|
|
0d0f09ee66 | ||
|
|
f200a7fb6a | ||
|
|
98691f75bc | ||
|
|
47e304d03c | ||
|
|
9108abf204 | ||
|
|
6529dcb3b5 | ||
|
|
4b22b288a6 | ||
|
|
abbf6ce6cc | ||
|
|
416ec316bd | ||
|
|
5ffc733eec | ||
|
|
8a23988711 | ||
|
|
35212b673e | ||
|
|
57ff9abca2 | ||
|
|
4752323e1c | ||
|
|
11593c6cc4 | ||
|
|
10025bda45 | ||
|
|
b800541fbe | ||
|
|
3a73f1ead5 | ||
|
|
1340281cb8 | ||
|
|
b9be841fd2 | ||
|
|
73890f31af | ||
|
|
456b2ef6eb | ||
|
|
ad8b53e6d4 | ||
|
|
0b5d1bc91d | ||
|
|
c43da3005a | ||
|
|
ca4c15bc63 | ||
|
|
a880283593 | ||
|
|
e464a5bfbc | ||
|
|
eb6bf454f1 | ||
|
|
ec06089eda | ||
|
|
0c4be55936 | ||
|
|
11d21d5d1b | ||
|
|
f9648d3976 | ||
|
|
2955aae8e4 | ||
|
|
9fd836e51f | ||
|
|
518f44908c | ||
|
|
38f60b3c1d | ||
|
|
e2c71717f8 | ||
|
|
7764c542f2 | ||
|
|
aa6468932b | ||
|
|
30104cb12b | ||
|
|
d53e560ce0 | ||
|
|
44c8af66ad | ||
|
|
68aaa5bbc3 | ||
|
|
17747db93f | ||
|
|
3fe27c8411 | ||
|
|
14b1c9f8e4 | ||
|
|
d84fc58cac | ||
|
|
187c3f62df | ||
|
|
4a447a439a | ||
|
|
4bfc50411c | ||
|
|
5e8392c8ef | ||
|
|
d3c81a6e93 | ||
|
|
7342b5355f | ||
|
|
1341bf5a9e | ||
|
|
48aebf2d9d | ||
|
|
7b14e9b660 | ||
|
|
07eb24b775 | ||
|
|
cd849bc2ff | ||
|
|
9c66812b99 | ||
|
|
ec91fa55db | ||
|
|
00d3cc4b69 | ||
|
|
14ff7f5fcf | ||
|
|
a97ce3c96e | ||
|
|
cfc5681b36 | ||
|
|
369a876ebe | ||
|
|
778e9c864f | ||
|
|
a2616b8227 | ||
|
|
8d8f28eae4 | ||
|
|
e7d7d5232c | ||
|
|
1d65ef3201 | ||
|
|
8d425e3372 | ||
|
|
3939c6f6e7 | ||
|
|
60d91234b9 | ||
|
|
37c14207d6 | ||
|
|
3b9fbf80ad | ||
|
|
c2fdf73491 | ||
|
|
143f9371c6 | ||
|
|
3f1902face | ||
|
|
c0adb52213 | ||
|
|
3520e946a2 | ||
|
|
f38adc1865 | ||
|
|
d5ff1c8e3b | ||
|
|
ad7417bc50 | ||
|
|
2d17c16d93 | ||
|
|
aded0bc81a | ||
|
|
36d36fab0b | ||
|
|
ba756cf366 | ||
|
|
c00d410e61 | ||
|
|
968342c732 | ||
|
|
5c15656c55 | ||
|
|
2e8fc6ebfe | ||
|
|
efe9fe6124 | ||
|
|
30c251efd3 | ||
|
|
c850905e43 | ||
|
|
a317a2531c | ||
|
|
ee20ebe07a | ||
|
|
2743d4ca87 | ||
|
|
6136a963c8 | ||
|
|
60417950c7 | ||
|
|
fa211f6a10 | ||
|
|
72e0745e2f | ||
|
|
aa4d1021eb | ||
|
|
93e7e4a0e5 | ||
|
|
c2f7cd1104 | ||
|
|
38eef5ce4c | ||
|
|
4cf80f96ad | ||
|
|
d4af132fc4 | ||
|
|
c087a05b43 | ||
|
|
cdb0e6ffed | ||
|
|
03b84091fc | ||
|
|
2be20588bf | ||
|
|
e59ee14f40 | ||
|
|
21a37e3393 | ||
|
|
810a4f0723 | ||
|
|
4c266df863 | ||
|
|
abd999f64a | ||
|
|
04de19c870 | ||
|
|
648cb13e02 | ||
|
|
174f428571 | ||
|
|
5388ae4acb | ||
|
|
2e338e84cb | ||
|
|
5089a7167d | ||
|
|
c0ac25bfff | ||
|
|
27a1f3ed2b | ||
|
|
90f36c1389 | ||
|
|
91817d0d1a | ||
|
|
55a3b071ea | ||
|
|
a38ce29137 | ||
|
|
7cea3f7da4 | ||
|
|
dcffd87e08 | ||
|
|
3b813148b3 | ||
|
|
4bba2cd034 | ||
|
|
28a1a17187 | ||
|
|
a5e879fbe4 | ||
|
|
f7f12b8604 | ||
|
|
cf5d051afc | ||
|
|
2f681bed57 | ||
|
|
2d0f65a5e3 | ||
|
|
b1705599e1 | ||
|
|
969b2d2110 | ||
|
|
f4b2ed2a92 | ||
|
|
dee3cf2d7f | ||
|
|
b460b5967f | ||
|
|
21058c34d0 | ||
|
|
5b1e6c7dbc | ||
|
|
691dc04fac | ||
|
|
e92434c2e7 | ||
|
|
cae09d8b84 | ||
|
|
f706a5b4c8 | ||
|
|
c54e3b4ea3 | ||
|
|
0fff9f9fa6 | ||
|
|
0becf7f03f | ||
|
|
972d876ca9 | ||
|
|
b8cb21c954 | ||
|
|
67062840c1 | ||
|
|
53f0cc1340 | ||
|
|
9626a981bc | ||
|
|
b912c8f035 | ||
|
|
fa13fe2184 | ||
|
|
85a1956e5c | ||
|
|
72743d1590 | ||
|
|
7ed1077879 | ||
|
|
16d7b90adf | ||
|
|
94424e14d7 | ||
|
|
e79874f58e | ||
|
|
8aae8b1d27 | ||
|
|
1813ff9dfa | ||
|
|
4ac31ea82b | ||
|
|
67ca157329 | ||
|
|
7c061fa3b6 | ||
|
|
f5e1b3d09e | ||
|
|
3ba4804d6c | ||
|
|
216de230e2 | ||
|
|
a91cfa03e7 | ||
|
|
087aaaf894 | ||
|
|
cbb7a09376 | ||
|
|
1a956424e0 | ||
|
|
f9aa239973 | ||
|
|
2073b79633 | ||
|
|
6a6a30c33c | ||
|
|
1cd5d7942f | ||
|
|
63e9005f01 | ||
|
|
d55f4336ae | ||
|
|
535efd34a0 | ||
|
|
e11e4bcbc7 | ||
|
|
4915433bd2 | ||
|
|
43d6e3ae06 | ||
|
|
225b812b5e | ||
|
|
a53cb236bb | ||
|
|
761992ca89 | ||
|
|
96ed0991b5 | ||
|
|
a42df3d364 | ||
|
|
ff94b1b0a9 | ||
|
|
b1845c6c83 | ||
|
|
62b1da3e2c | ||
|
|
d26b24f670 | ||
|
|
8ba40e492c | ||
|
|
2c372a9894 | ||
|
|
3d3b75fb8d | ||
|
|
142b057be8 | ||
|
|
4790868878 | ||
|
|
342ade03f6 | ||
|
|
1a0b7f58f9 | ||
|
|
9407dbf387 | ||
|
|
423aeb0d81 | ||
|
|
790323ac37 | ||
|
|
920b863955 | ||
|
|
c1382c09d9 | ||
|
|
febe9cc26a | ||
|
|
2ce2e88adf | ||
|
|
c7599d323b | ||
|
|
e906b511e9 | ||
|
|
4dd07e5763 | ||
|
|
d93bdea433 | ||
|
|
26cfd52e7e | ||
|
|
2d7a96342c | ||
|
|
cdd6c9f52e | ||
|
|
5e529a1c96 | ||
|
|
7fee96e9de | ||
|
|
5686a7e273 | ||
|
|
b91040f7fb | ||
|
|
566e0e2048 | ||
|
|
3aad09be28 | ||
|
|
f0358acb32 | ||
|
|
fd0de4ab32 | ||
|
|
73a308502f | ||
|
|
bd59f150b8 | ||
|
|
4b68d69188 | ||
|
|
f90422a890 | ||
|
|
8befedef14 | ||
|
|
2af3004409 | ||
|
|
38ee40d59c | ||
|
|
d583f1ac0e | ||
|
|
2bcb02f628 | ||
|
|
4301a33371 | ||
|
|
f1a03a4ee8 | ||
|
|
167ddf9c9c | ||
|
|
f833e41e69 | ||
|
|
3780ec699f | ||
|
|
41688a936b | ||
|
|
b2db8123ec | ||
|
|
b330c2c57e | ||
|
|
231c5cf6de | ||
|
|
7214a0160a | ||
|
|
375b79f11b | ||
|
|
661068d1a2 | ||
|
|
3da1869d5e | ||
|
|
000a7aa094 | ||
|
|
7cedc5369d | ||
|
|
e5ecd20d44 | ||
|
|
53aaa5d2a5 | ||
|
|
cccf2de129 | ||
|
|
9d39fb3604 | ||
|
|
4a007e3767 | ||
|
|
95814359bd | ||
|
|
de6c286258 | ||
|
|
d0ae69087c | ||
|
|
5e15b0b844 | ||
|
|
7ea026ff1d | ||
|
|
6e0575a53d | ||
|
|
fb9be81fab | ||
|
|
60791d6dd1 | ||
|
|
eba423bb9d | ||
|
|
301de169e9 | ||
|
|
1868c7016d | ||
|
|
0c71ce3398 | ||
|
|
bc285cf0dd | ||
|
|
209a3ee7af | ||
|
|
7d19ab9f62 | ||
|
|
3f6d624c7b | ||
|
|
c138272d63 | ||
|
|
7dbfea1353 | ||
|
|
c121d27f31 | ||
|
|
43c19a6b82 | ||
|
|
6542bc4a03 | ||
|
|
bede525dc9 | ||
|
|
e45c90060f | ||
|
|
7d79c723e5 | ||
|
|
5f6d6c3b70 | ||
|
|
d15042470e | ||
|
|
f1f414ca59 | ||
|
|
cdf4815a6b | ||
|
|
fade056244 | ||
|
|
a546047c95 | ||
|
|
ea210319ce | ||
|
|
2896e780ae | ||
|
|
eaafb23535 | ||
|
|
baa30f4289 | ||
|
|
2164984d2b | ||
|
|
189c861835 | ||
|
|
6656fa3066 | ||
|
|
0cc2ed04f5 | ||
|
|
b11adfa5cd | ||
|
|
9baeda781a | ||
|
|
bd032d13ff | ||
|
|
730775bb4e | ||
|
|
d31eaddba3 | ||
|
|
3202f78f0f | ||
|
|
6de410a0aa | ||
|
|
a3f41c7049 | ||
|
|
1847f17f50 | ||
|
|
1bc32215b9 | ||
|
|
96009975d6 | ||
|
|
de9b391db3 | ||
|
|
a62572fb86 | ||
|
|
011a2c0b78 | ||
|
|
daf4418cbb | ||
|
|
0f8df3d340 | ||
|
|
9cac385aec | ||
|
|
814ddc0923 | ||
|
|
247795dd36 | ||
|
|
dfadf70a7f | ||
|
|
d348ec0f6c | ||
|
|
b730bd1396 | ||
|
|
56e0c6adf8 | ||
|
|
f44a960dcd | ||
|
|
6c1bbf918d | ||
|
|
48e614b167 | ||
|
|
fe8d33452b | ||
|
|
c19ece6921 | ||
|
|
216fa57b88 | ||
|
|
a9558ae248 | ||
|
|
ee9077db7d | ||
|
|
9c85928740 | ||
|
|
af0309371e | ||
|
|
ead3c186a6 | ||
|
|
6ac48a65cb | ||
|
|
2ecf5ba1de | ||
|
|
94f1a1dea3 | ||
|
|
c045ae15e7 | ||
|
|
1756b7c6ff | ||
|
|
e25ace2151 | ||
|
|
a8e5a86fa0 | ||
|
|
f8edc233ab | ||
|
|
337c2a7cb4 | ||
|
|
2d735144b9 | ||
|
|
0113035237 | ||
|
|
52a1d248b2 | ||
|
|
b5ed42c845 | ||
|
|
d9e7cadacf | ||
|
|
36e88cbd50 | ||
|
|
6d76efb9bb | ||
|
|
a1de9cec58 | ||
|
|
6885c72f32 | ||
|
|
0f1389e992 | ||
|
|
5bf3eeaa77 | ||
|
|
9dda1fd624 | ||
|
|
2dc46cb153 | ||
|
|
0674c0075e | ||
|
|
518ef670da | ||
|
|
0dd626ec67 | ||
|
|
53f4c0fdc0 | ||
|
|
7e3ea77fdf | ||
|
|
7290d23b26 | ||
|
|
24f20eb1bd | ||
|
|
4c9de098b0 | ||
|
|
a2ccba69e5 | ||
|
|
8eb99d3a87 | ||
|
|
3773874cd3 | ||
|
|
6c62b1a2ea | ||
|
|
b768645fde | ||
|
|
7b58dcb28c | ||
|
|
fea4a1e68e | ||
|
|
a9e83dd42c | ||
|
|
145f501a21 | ||
|
|
9b3b04ecec | ||
|
|
3e063cca5c | ||
|
|
27d716c663 | ||
|
|
fbd15cb7b7 | ||
|
|
f7c91eff54 | ||
|
|
a6bdc086a2 | ||
|
|
1242dd951a | ||
|
|
d1c8e9f31b | ||
|
|
83ccae6c8b | ||
|
|
086be07bf5 | ||
|
|
28f9c477a8 | ||
|
|
09571d03a5 | ||
|
|
71ce63f79c | ||
|
|
5205c9591f | ||
|
|
9a547dcbfb | ||
|
|
27632ca6ec | ||
|
|
c7470e6e6e | ||
|
|
d090a17ed0 | ||
|
|
c2529260e7 | ||
|
|
da87188ff8 | ||
|
|
d099039f5d | ||
|
|
5dd9cf4398 | ||
|
|
ab77b216d1 | ||
|
|
b37a02cddf |
17
.github/ISSUE_TEMPLATE.md
vendored
17
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,3 +1,12 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
@@ -15,6 +24,8 @@
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
@@ -30,8 +41,6 @@
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Version used (`minio --version`):
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -24,6 +24,8 @@ assignees: ''
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
<!--- and make sure you have followed https://github.com/minio/minio/tree/release/docs/debugging to capture relevant logs -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
@@ -39,8 +41,6 @@ assignees: ''
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used (`minio version`):
|
||||
* Environment name and version (e.g. nginx 1.9.1):
|
||||
* Server type and version:
|
||||
* Version used (`minio --version`):
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
||||
* Link to your project:
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/config.yml
vendored
4
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -2,7 +2,7 @@ blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: MinIO Community Support
|
||||
url: https://slack.min.io
|
||||
about: Please ask and answer questions here.
|
||||
about: Join here for Community Support
|
||||
- name: MinIO SUBNET Support
|
||||
url: https://min.io/pricing
|
||||
about: Join this for Enterprise Support.
|
||||
about: Join here for Enterprise Support
|
||||
|
||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -16,4 +16,3 @@
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation needed
|
||||
- [ ] Unit tests needed
|
||||
- [ ] Functional tests needed (If yes, add [mint](https://github.com/minio/mint) PR # here: )
|
||||
|
||||
2
.github/lock.yml
vendored
2
.github/lock.yml
vendored
@@ -11,7 +11,7 @@ skipCreatedBefore: false
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
lockLabel: true
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
7
.github/stale.yml
vendored
7
.github/stale.yml
vendored
@@ -1,11 +1,11 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 90
|
||||
daysUntilStale: 30
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 30
|
||||
daysUntilClose: 15
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
@@ -14,6 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
@@ -30,7 +31,7 @@ staleLabel: stale
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >-
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed after 21 days if no further activity
|
||||
recent activity. It will be closed after 15 days if no further activity
|
||||
occurs. Thank you for your contributions.
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
|
||||
54
.github/workflows/go.yml
vendored
Normal file
54
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x, 1.15.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_CI_CD: 1
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo apt-get install devscripts shellcheck
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-linux.amd64-${nancy_version} && chmod +x nancy
|
||||
go list -m all | ./nancy sleuth
|
||||
make
|
||||
diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
make test-race
|
||||
make crosscompile
|
||||
make verify
|
||||
make verify-healing
|
||||
cd browser && npm install && npm run test && cd ..
|
||||
bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
32
.golangci.yml
Normal file
32
.golangci.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- typecheck
|
||||
- goimports
|
||||
- misspell
|
||||
- govet
|
||||
- golint
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- pkg/rpc
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
167
.goreleaser.yml
Normal file
167
.goreleaser.yml
Normal file
@@ -0,0 +1,167 @@
|
||||
project_name: minio
|
||||
|
||||
release:
|
||||
name_template: "Version {{.MinIO.Version}}"
|
||||
disable: true
|
||||
github:
|
||||
owner: minio
|
||||
name: minio
|
||||
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- make clean
|
||||
- go generate ./...
|
||||
- go mod tidy
|
||||
- go mod download
|
||||
|
||||
builds:
|
||||
-
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
- freebsd
|
||||
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
- ppc64le
|
||||
- s390x
|
||||
|
||||
goarm:
|
||||
- 7
|
||||
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
- goos: freebsd
|
||||
goarch: arm64
|
||||
- goos: freebsd
|
||||
goarch: ppc64le
|
||||
- goos: freebsd
|
||||
goarch: s390x
|
||||
|
||||
flags:
|
||||
- -tags=kqueue
|
||||
- -trimpath
|
||||
|
||||
ldflags:
|
||||
- "-s -w -X github.com/minio/minio/cmd.Version={{.Version}} -X github.com/minio/minio/cmd.ReleaseTag={{.Tag}} -X github.com/minio/minio/cmd.CommitID={{.FullCommit}} -X github.com/minio/minio/cmd.ShortCommitID={{.ShortCommit}}"
|
||||
|
||||
archives:
|
||||
-
|
||||
format: binary
|
||||
name_template: "{{ .Binary }}-release/{{ .Os }}-{{ .Arch }}/{{ .Binary }}.{{ .Version }}"
|
||||
|
||||
nfpms:
|
||||
-
|
||||
id: minio
|
||||
package_name: minio
|
||||
vendor: MinIO, Inc.
|
||||
homepage: https://min.io/
|
||||
maintainer: dev@min.io
|
||||
description: MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
license: Apache 2.0
|
||||
bindir: /usr/bin
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
overrides:
|
||||
deb:
|
||||
file_name_template: "{{ .Binary }}-release/debs/{{ .ProjectName }}-{{ .Version }}_{{ .Arch }}"
|
||||
replacements:
|
||||
arm: armv7
|
||||
files:
|
||||
"NOTICE": "/usr/share/minio/NOTICE"
|
||||
"CREDITS": "/usr/share/minio/CREDITS"
|
||||
"LICENSE": "/usr/share/minio/LICENSE"
|
||||
"README.md": "/usr/share/minio/README.md"
|
||||
rpm:
|
||||
file_name_template: "{{ .Binary }}-release/rpms/{{ .ProjectName }}-{{ .Version }}.{{ .Arch }}"
|
||||
replacements:
|
||||
amd64: x86_64
|
||||
arm64: aarch64
|
||||
arm: armv7
|
||||
files:
|
||||
"NOTICE": "/usr/share/minio/NOTICE"
|
||||
"CREDITS": "/usr/share/minio/CREDITS"
|
||||
"LICENSE": "/usr/share/minio/LICENSE"
|
||||
"README.md": "/usr/share/minio/README.md"
|
||||
|
||||
checksum:
|
||||
algorithm: sha256
|
||||
|
||||
signs:
|
||||
-
|
||||
signature: "${artifact}.minisig"
|
||||
cmd: "sh"
|
||||
args:
|
||||
- '-c'
|
||||
- 'minisign -s /media/${USER}/minio/minisign.key -qQSm ${artifact} < /media/${USER}/minio/minisign-passphrase'
|
||||
artifacts: all
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^Update yaml files'
|
||||
|
||||
dockers:
|
||||
-
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
dockerfile: Dockerfile.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}
|
||||
- minio/minio:latest
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: ppc64le
|
||||
dockerfile: Dockerfile.ppc64le.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-ppc64le
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: s390x
|
||||
dockerfile: Dockerfile.s390x.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-s390x
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
goarm: ''
|
||||
dockerfile: Dockerfile.arm64.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-arm64
|
||||
|
||||
-
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: '7'
|
||||
dockerfile: Dockerfile.arm.release
|
||||
image_templates:
|
||||
- minio/minio:{{ .Tag }}-arm
|
||||
0
.nancy-ignore
Normal file
0
.nancy-ignore
Normal file
58
.travis.yml
58
.travis.yml
@@ -1,58 +0,0 @@
|
||||
go_import_path: github.com/minio/minio
|
||||
|
||||
language: go
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- shellcheck
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
# this ensures PRs based on a local branch are not built twice
|
||||
# the downside is that a PR targeting a different branch is not built
|
||||
# but as a workaround you can add the branch to this list
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
- diff -au <(gofmt -s -d pkg) <(printf "")
|
||||
- make test-race
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- cd browser && npm install && npm run test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- SIMPLE_CI=1
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do go test -v --timeout 20m "$d"; done
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
# https://github.com/travis-ci/travis-ci/issues/8361
|
||||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.13-alpine
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
@@ -9,21 +9,21 @@ ENV GO111MODULE on
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.10
|
||||
FROM alpine:3.12
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=0 /go/bin/minio /usr/bin/minio
|
||||
COPY --from=0 /go/minio/CREDITS /third_party/
|
||||
COPY --from=0 /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /third_party/
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm32v7/alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,41 +0,0 @@
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
WORKDIR /home
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git 'curl>7.61.0' && \
|
||||
git clone https://github.com/minio/minio && \
|
||||
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
|
||||
|
||||
FROM arm64v8/alpine:3.10
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
COPY --from=builder /home/qemu-arm-static /usr/bin/qemu-arm-static
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
36
Dockerfile.cicd
Normal file
36
Dockerfile.cicd
Normal file
@@ -0,0 +1,36 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio && cd minio && \
|
||||
git checkout master && go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)"
|
||||
|
||||
FROM alpine:3.12
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
COPY --from=builder /go/bin/minio /usr/bin/minio
|
||||
COPY --from=builder /go/minio/CREDITS /third_party/
|
||||
COPY --from=builder /go/minio/dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio", "server", "/data"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.10
|
||||
FROM alpine:3.12
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
||||
12
Dockerfile.dev.browser
Normal file
12
Dockerfile.dev.browser
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM ubuntu
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
git golang make npm && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH=$PATH:/root/go/bin
|
||||
|
||||
RUN go get github.com/go-bindata/go-bindata/go-bindata && \
|
||||
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
@@ -1,32 +1,32 @@
|
||||
FROM golang:1.13-alpine
|
||||
FROM alpine:3.12
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN \
|
||||
apk add --no-cache git && \
|
||||
git clone https://github.com/minio/minio
|
||||
|
||||
FROM alpine:3.10
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
COPY CREDITS /third_party/
|
||||
|
||||
ENV MINIO_UPDATE off
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
apk update && apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' minisign && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
curl -s -q -O https://raw.githubusercontent.com/minio/minio/master/CREDITS && \
|
||||
/usr/bin/verify-minio.sh
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
49
Dockerfile.release.ubi
Normal file
49
Dockerfile.release.ubi
Normal file
@@ -0,0 +1,49 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="RELEASE.2020-11-25T22-36-25Z" \
|
||||
release="RELEASE.2020-11-25T22-36-25Z" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
|
||||
MINIO_SSE_MASTER_KEY_FILE=sse_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav"
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.ubi.sh /usr/bin/docker-entrypoint.ubi.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils --nodocs && \
|
||||
curl -s -q https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -o epel-release.rpm && \
|
||||
rpm -ivh epel-release.rpm && microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.ubi.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
groupadd --gid 1000 minio && \
|
||||
useradd -M --uid 1000 --gid 1000 --home /usr/share/minio minio && \
|
||||
mkdir -p /data && chown -R minio:minio /usr/bin /data
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
USER minio
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.ubi.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,80 +0,0 @@
|
||||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.13
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
ENV GO111MODULE=on
|
||||
ENV SIMPLE_CI 1
|
||||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo
|
||||
RUN touch /etc/sudoers
|
||||
|
||||
RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
|
||||
RUN echo "ci ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
RUN echo "Defaults env_reset" >> /etc/sudoers
|
||||
RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go:/usr/local/go/bin"' >> /etc/sudoers
|
||||
|
||||
RUN mkdir -p /home/ci/.cache
|
||||
|
||||
RUN groupadd -g 999 ci && \
|
||||
useradd -r -u 999 -g ci ci && \
|
||||
chown -R ci:ci /go /home/ci && \
|
||||
chmod -R a+rw /go
|
||||
|
||||
USER ci
|
||||
|
||||
# -- tests --
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN make test-race
|
||||
RUN make crosscompile
|
||||
RUN make verify
|
||||
|
||||
## -- add healing tests
|
||||
RUN make verify-healing
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 2: Test Frontend
|
||||
#-------------------------------------------------------------
|
||||
FROM node:10.15-stretch-slim
|
||||
|
||||
ENV SIMPLE_CI 1
|
||||
|
||||
COPY browser /minio/browser
|
||||
WORKDIR /minio/browser
|
||||
|
||||
RUN yarn
|
||||
RUN yarn test
|
||||
|
||||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
FROM ubuntu:18.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
COPY mint /mint
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /usr/local/gopath
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
ENV SIMPLE_CI 1
|
||||
ENV MINT_ROOT_DIR /mint
|
||||
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && \
|
||||
apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
RUN /usr/bin/gateway-tests.sh
|
||||
48
Makefile
48
Makefile
@@ -8,8 +8,6 @@ GOOS := $(shell go env GOOS)
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
TAG ?= "minio/minio:$(VERSION)"
|
||||
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
|
||||
all: build
|
||||
|
||||
checks:
|
||||
@@ -18,22 +16,19 @@ checks:
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golint 1>/dev/null || (echo "Installing golint" && GO111MODULE=off go get -u golang.org/x/lint/golint)
|
||||
ifeq ($(GOARCH),s390x)
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck)
|
||||
else
|
||||
@which staticcheck 1>/dev/null || (echo "Installing staticcheck" && wget --quiet https://github.com/dominikh/go-tools/releases/download/2020.1.3/staticcheck_${GOOS}_${GOARCH}.tar.gz && tar xf staticcheck_${GOOS}_${GOARCH}.tar.gz && mv staticcheck/staticcheck ${GOPATH}/bin/staticcheck && chmod +x ${GOPATH}/bin/staticcheck && rm -f staticcheck_${GOOS}_${GOARCH}.tar.gz && rm -rf staticcheck)
|
||||
endif
|
||||
@which misspell 1>/dev/null || (echo "Installing misspell" && GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell)
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && GO111MODULE=off go get github.com/tinylib/msgp)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && GO111MODULE=off go get golang.org/x/tools/cmd/stringer)
|
||||
|
||||
crosscompile:
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps vet fmt lint staticcheck spelling
|
||||
verifiers: getdeps fmt lint ruleguard check-gen
|
||||
|
||||
vet:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
check-gen:
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
fmt:
|
||||
@echo "Running $@ check"
|
||||
@@ -42,21 +37,12 @@ fmt:
|
||||
|
||||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
staticcheck:
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
@@ -71,21 +57,23 @@ test-race: verifiers build
|
||||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
verify-healing:
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
docker: build
|
||||
docker: checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
|
||||
71
README.md
71
README.md
@@ -3,26 +3,30 @@
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Using MinIO build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
docker pull minio/minio
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### Edge
|
||||
```
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
|
||||
> NOTE: Docker will not display the default keys unless you start the container with the `-it`(interactive TTY) argument. Generally, it is not recommended to use default keys with containers. Please visit MinIO Docker quickstart guide for more information [here](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew (recommended)
|
||||
Install minio packages using [Homebrew](http://brew.sh/)
|
||||
Install minio packages using [Homebrew](https://brew.sh/)
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
@@ -84,7 +88,7 @@ service minio start
|
||||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
@@ -94,23 +98,6 @@ GO111MODULE=on go get github.com/minio/minio
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
### iptables
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
### ufw
|
||||
|
||||
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
|
||||
@@ -145,6 +132,23 @@ Note that `permanent` makes sure the rules are persistent across firewall start,
|
||||
firewall-cmd --reload
|
||||
```
|
||||
|
||||
### iptables
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Test using MinIO Browser
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully.
|
||||
|
||||
@@ -159,20 +163,23 @@ When deployed on a single drive, MinIO server lets clients access any pre-existi
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster and restart them, as shown in the following command from the MinIO client (mc):
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
**Important things to remember during upgrades**:
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
|
||||
### Important things to remember during MinIO upgrades
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` until all clusters have been updated.
|
||||
- If you are updating the server it is always recommended (unless explicitly mentioned in MinIO server release notes), to update `mc` once all the servers have been upgraded using `mc update`.
|
||||
- `mc admin update` is disabled in docker/container environments, container environments provide their own mechanisms for updating running containers.
|
||||
- If you are using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If you are using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
@@ -186,4 +193,4 @@ mc admin update <minio alias, e.g., myminio>
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fminio%2Fminio?ref=badge_large)
|
||||
Use of MinIO is governed by the Apache 2.0 License found at [LICENSE](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
116
README_zh_CN.md
116
README_zh_CN.md
@@ -7,29 +7,33 @@ MinIO是一个非常轻量的服务,可以很简单的和其他应用的结合
|
||||
## Docker 容器
|
||||
### 稳定版
|
||||
```
|
||||
docker pull minio/minio
|
||||
docker run -p 9000:9000 minio/minio server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio server /data
|
||||
```
|
||||
|
||||
### 尝鲜版
|
||||
```
|
||||
docker pull minio/minio:edge
|
||||
docker run -p 9000:9000 minio/minio:edge server /data
|
||||
docker run -p 9000:9000 \
|
||||
-e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \
|
||||
-e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
|
||||
minio/minio:edge server /data
|
||||
```
|
||||
更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
> 提示:除非你通过`-it`(TTY交互)参数启动容器,否则Docker将不会显示默认的密钥。一般情况下,并不推荐使用容器的默认密钥,更多Docker部署信息请访问 [这里](https://docs.min.io/docs/minio-docker-quickstart-guide)
|
||||
|
||||
## macOS
|
||||
### Homebrew
|
||||
### Homebrew(推荐)
|
||||
使用 [Homebrew](http://brew.sh/)安装minio
|
||||
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
#### Note
|
||||
如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
|
||||
|
||||
```
|
||||
> 提示:如果你之前使用 `brew install minio`安装过minio, 可以用 `minio/stable/minio` 官方镜像进行重装. 由于golang 1.8的bug,homebrew版本不太稳定。
|
||||
```sh
|
||||
brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
@@ -49,6 +53,16 @@ chmod 755 minio
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | 64-bit Intel | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
| 操作系统 | CPU架构 | 地址 |
|
||||
| ---------- | -------- | ------ |
|
||||
| GNU/Linux | ppc64le | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-ppc64le/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
@@ -64,7 +78,7 @@ minio.exe server D:\Photos
|
||||
|
||||
## FreeBSD
|
||||
### Port
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装。
|
||||
使用 [pkg](https://github.com/freebsd/pkg)进行安装,, MinIO官方并没有提供FreeBSD二进制文件, 它由FreeBSD上游维护,点击 [这里](https://www.freshports.org/www/minio)查看。
|
||||
|
||||
```sh
|
||||
pkg install minio
|
||||
@@ -75,14 +89,68 @@ service minio start
|
||||
|
||||
## 使用源码安装
|
||||
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install).
|
||||
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境, 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go get -u github.com/minio/minio
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
## 为防火墙设置允许访问的端口
|
||||
|
||||
默认情况下,MinIO 使用端口9000来侦听传入的连接。如果你的平台默认阻止了该端口,则需要启用对该端口的访问。
|
||||
|
||||
### ufw
|
||||
|
||||
对于启用了ufw的主机(基于Debian的发行版), 你可以通过`ufw`命令允许指定端口上的所有流量连接. 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
ufw allow 9000
|
||||
```
|
||||
|
||||
如下命令允许端口9000-9010上的所有传入流量。
|
||||
|
||||
```sh
|
||||
ufw allow 9000:9010/tcp
|
||||
```
|
||||
|
||||
### firewall-cmd
|
||||
|
||||
对于启用了firewall-cmd的主机(CentOS), 你可以通过`firewall-cmd`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
firewall-cmd --get-active-zones
|
||||
```
|
||||
|
||||
这个命令获取当前正在使用的区域。 现在,就可以为以上返回的区域应用端口规则了。 假如返回的区域是 `public`, 使用如下命令
|
||||
|
||||
```sh
|
||||
firewall-cmd --zone=public --add-port=9000/tcp --permanent
|
||||
```
|
||||
|
||||
这里的`permanent`参数表示持久化存储规则,可用于防火墙启动、重启和重新加载。 最后,需要防火墙重新加载,让我们刚刚的修改生效。
|
||||
|
||||
```sh
|
||||
firewall-cmd --reload
|
||||
```
|
||||
|
||||
### iptables
|
||||
|
||||
对于启用了iptables的主机(RHEL, CentOS, etc), 你可以通过`iptables`命令允许指定端口上的所有流量连接。 通过如下命令允许访问端口9000
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
如下命令允许端口9000-9010上的所有传入流量。
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## 使用MinIO浏览器进行验证
|
||||
安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000),如果可以访问,则表示minio已经安装成功。
|
||||
MinIO Server带有一个嵌入的Web对象浏览器,安装后使用浏览器访问[http://127.0.0.1:9000](http://127.0.0.1:9000),如果可以访问,则表示minio已经安装成功。
|
||||
|
||||

|
||||
|
||||
@@ -94,6 +162,25 @@ go get -u github.com/minio/minio
|
||||
|
||||
上述描述对所有网关后端同样有效。
|
||||
|
||||
## 升级 MinIO
|
||||
MinIO 服务端支持滚动升级, 也就是说你可以一次更新分布式集群中的一个MinIO实例。 这样可以在不停机的情况下进行升级。可以通过将二进制文件替换为最新版本并以滚动方式重新启动所有服务器来手动完成升级。但是, 我们建议所有用户从客户端使用 [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) 命令升级。 这将同时更新集群中的所有节点并重新启动它们, 如下命令所示:
|
||||
|
||||
```
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
> 注意: 有些发行版可能不允许滚动升级,这通常在发行说明中提到,所以建议在升级之前阅读发行说明。在这种情况下,建议使用`mc admin update`升级机制来一次升级所有服务器。
|
||||
|
||||
### MinIO升级时要记住的重要事项
|
||||
|
||||
- `mc admin update` 命令仅当运行MinIO的用户对二进制文件所在的父目录具有写权限时才工作, 比如当前二进制文件位于`/usr/local/bin/minio`, 你需要具备`/usr/local/bin`目录的写权限.
|
||||
- `mc admin update` 命令同时更新并重新启动所有服务器,应用程序将在升级后重试并继续各自的操作。
|
||||
- `mc admin update` 命令在 kubernetes/container 环境下是不能用的, 容器环境提供了它自己的更新机制来更新。
|
||||
- 对于联盟部署模式,应分别针对每个群集运行`mc admin update`。 在成功更新所有群集之前,不要将`mc`更新为任何新版本。
|
||||
- 如果将`kes`用作MinIO的KMS,只需替换二进制文件并重新启动`kes`,可以在 [这里](https://github.com/minio/kes/wiki) 找到有关`kes`的更多信息。
|
||||
- 如果将Vault作为MinIO的KMS,请确保已遵循如下Vault升级过程的概述:https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- 如果将MindIO与etcd配合使用, 请确保已遵循如下etcd升级过程的概述: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
|
||||
## 了解更多
|
||||
- [MinIO纠删码入门](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [`mc`快速入门](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
@@ -104,3 +191,6 @@ go get -u github.com/minio/minio
|
||||
|
||||
## 如何参与到MinIO项目
|
||||
请参考 [贡献者指南](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)。欢迎各位中国程序员加到MinIO项目中。
|
||||
|
||||
## 授权许可
|
||||
MinIO的使用受 Apache 2.0 License 约束,你可以在 [LICENSE](./LICENSE) 查看许可。
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
``MinIO Browser`` provides minimal set of UI to manage buckets and objects on ``minio`` server. ``MinIO Browser`` is written in javascript and released under [Apache 2.0 License](./LICENSE).
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Install node
|
||||
@@ -11,6 +12,11 @@ exec -l $SHELL
|
||||
nvm install stable
|
||||
```
|
||||
|
||||
### Install node dependencies
|
||||
```sh
|
||||
npm install
|
||||
```
|
||||
|
||||
### Install `go-bindata` and `go-bindata-assetfs`
|
||||
|
||||
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
|
||||
@@ -30,13 +36,16 @@ npm run release
|
||||
|
||||
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
|
||||
|
||||
|
||||
## Run MinIO Browser with live reload
|
||||
|
||||
### Run MinIO Browser with live reload
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on custom port
|
||||
|
||||
@@ -54,7 +63,7 @@ index 3ccdaba..9496c56 100644
|
||||
+ port: 8888,
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
target: 'http://localhost:9000',
|
||||
@@ -97,7 +98,7 @@ var exports = {
|
||||
if (process.env.NODE_ENV === 'dev') {
|
||||
exports.entry = [
|
||||
@@ -70,4 +79,102 @@ index 3ccdaba..9496c56 100644
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application
|
||||
Open [http://localhost:8888/minio/](http://localhost:8888/minio/) in your browser to play with the application.
|
||||
|
||||
### Run MinIO Browser with live reload on any IP
|
||||
|
||||
Edit `browser/webpack.config.js`
|
||||
|
||||
```diff
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
```
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
## Run tests
|
||||
|
||||
npm run test
|
||||
|
||||
|
||||
## Docker development environment
|
||||
|
||||
This approach will download the sources on your machine such that you are able to use your IDE or editor of choice.
|
||||
A Docker container will be used in order to provide a controlled build environment without messing with your host system.
|
||||
|
||||
### Prepare host system
|
||||
|
||||
Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and [Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
### Development within container
|
||||
|
||||
Prepare and build container
|
||||
```
|
||||
git clone git@github.com:minio/minio.git
|
||||
cd minio
|
||||
docker build -t minio-dev -f Dockerfile.dev.browser .
|
||||
```
|
||||
|
||||
Run container, build and run core
|
||||
```sh
|
||||
docker run -it --rm --name minio-dev -v "$PWD":/minio minio-dev
|
||||
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run release
|
||||
cd /minio
|
||||
make
|
||||
./minio server /data
|
||||
```
|
||||
Note `Endpoint` IP (the one which is _not_ `127.0.0.1`), `AccessKey` and `SecretKey` (both default to `minioadmin`) in order to enter them in the browser later.
|
||||
|
||||
|
||||
Open another terminal.
|
||||
Connect to container
|
||||
```sh
|
||||
docker exec -it minio-dev bash
|
||||
```
|
||||
|
||||
Apply patch to allow access from outside container
|
||||
```sh
|
||||
cd /minio
|
||||
git apply --ignore-whitespace <<EOF
|
||||
diff --git a/browser/webpack.config.js b/browser/webpack.config.js
|
||||
index 8bdbba53..139f6049 100644
|
||||
--- a/browser/webpack.config.js
|
||||
+++ b/browser/webpack.config.js
|
||||
@@ -71,6 +71,7 @@ var exports = {
|
||||
historyApiFallback: {
|
||||
index: '/minio/'
|
||||
},
|
||||
+ host: '0.0.0.0',
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
EOF
|
||||
```
|
||||
|
||||
Build and run frontend with auto-reload
|
||||
```sh
|
||||
cd /minio/browser
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open [http://IP:8080/minio/](http://IP:8080/minio/) in your browser to play with the application.
|
||||
|
||||
|
||||
@@ -57,22 +57,6 @@ export class BrowserDropdown extends React.Component {
|
||||
const { fetchServerInfo } = this.props
|
||||
fetchServerInfo()
|
||||
}
|
||||
fullScreen(e) {
|
||||
e.preventDefault()
|
||||
let el = document.documentElement
|
||||
if (el.requestFullscreen) {
|
||||
el.requestFullscreen()
|
||||
}
|
||||
if (el.mozRequestFullScreen) {
|
||||
el.mozRequestFullScreen()
|
||||
}
|
||||
if (el.webkitRequestFullscreen) {
|
||||
el.webkitRequestFullscreen()
|
||||
}
|
||||
if (el.msRequestFullscreen) {
|
||||
el.msRequestFullscreen()
|
||||
}
|
||||
}
|
||||
logout(e) {
|
||||
e.preventDefault()
|
||||
web.Logout()
|
||||
@@ -87,24 +71,30 @@ export class BrowserDropdown extends React.Component {
|
||||
<i className="fas fa-bars" />
|
||||
</Dropdown.Toggle>
|
||||
<Dropdown.Menu className="dropdown-menu-right">
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/?ref=ob">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://github.com/minio/minio">
|
||||
GitHub <i className="fab fa-github" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.fullScreen}>
|
||||
Fullscreen <i className="fas fa-expand" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://docs.min.io/">
|
||||
Documentation <i className="fas fa-book" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target="_blank" href="https://slack.min.io">
|
||||
Ask for help <i className="fas fa-question-circle" />
|
||||
<a target="_blank" href="https://min.io/pricing?ref=ob">
|
||||
Get Support <i className="fas fa-question-circle" />
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
@@ -118,20 +108,9 @@ export class BrowserDropdown extends React.Component {
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" onClick={this.showChangePassword.bind(this)}>
|
||||
Change Password <i className="fas fa-cog" />
|
||||
</a>
|
||||
{this.state.showChangePasswordModal && (
|
||||
<ChangePasswordModal
|
||||
serverInfo={serverInfo}
|
||||
hideChangePassword={this.hideChangePassword.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</li>
|
||||
<li>
|
||||
<a href="" id="logout" onClick={this.logout}>
|
||||
Sign Out <i className="fas fa-sign-out-alt" />
|
||||
Logout <i className="fas fa-sign-out-alt" />
|
||||
</a>
|
||||
</li>
|
||||
</Dropdown.Menu>
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import ObjectsSearch from "../objects/ObjectsSearch"
|
||||
import Path from "../objects/Path"
|
||||
import StorageInfo from "./StorageInfo"
|
||||
import BrowserDropdown from "./BrowserDropdown"
|
||||
@@ -27,6 +28,7 @@ export const Header = () => {
|
||||
<header className="fe-header">
|
||||
<Path />
|
||||
{loggedIn && <StorageInfo />}
|
||||
{loggedIn && <ObjectsSearch />}
|
||||
<ul className="feh-actions">
|
||||
{loggedIn ? (
|
||||
<BrowserDropdown />
|
||||
|
||||
@@ -26,13 +26,10 @@ export class StorageInfo extends React.Component {
|
||||
}
|
||||
render() {
|
||||
const { used } = this.props.storageInfo
|
||||
|
||||
if (!used) {
|
||||
if (!used || used == 0) {
|
||||
return <noscript />
|
||||
}
|
||||
|
||||
const totalUsed = used.reduce((v1, v2) => v1 + v2, 0)
|
||||
|
||||
return (
|
||||
<div className="feh-used">
|
||||
<div className="fehu-chart">
|
||||
@@ -41,7 +38,7 @@ export class StorageInfo extends React.Component {
|
||||
<ul>
|
||||
<li>
|
||||
<span>Used: </span>
|
||||
{humanize.filesize(totalUsed)}
|
||||
{humanize.filesize(used)}
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
@@ -21,7 +21,7 @@ import { StorageInfo } from "../StorageInfo"
|
||||
describe("StorageInfo", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(
|
||||
<StorageInfo storageInfo={{ used: [60] }} fetchStorageInfo={jest.fn()} />
|
||||
<StorageInfo storageInfo={ {used: 60} } fetchStorageInfo={jest.fn()} />
|
||||
)
|
||||
})
|
||||
|
||||
@@ -29,7 +29,7 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: [60] }}
|
||||
storageInfo={ {used: 60} }
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
@@ -40,7 +40,7 @@ describe("StorageInfo", () => {
|
||||
const fetchStorageInfo = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<StorageInfo
|
||||
storageInfo={{ used: null }}
|
||||
storageInfo={ {used: 0} }
|
||||
fetchStorageInfo={fetchStorageInfo}
|
||||
/>
|
||||
)
|
||||
|
||||
@@ -20,7 +20,9 @@ import * as actionsCommon from "../actions"
|
||||
|
||||
jest.mock("../../web", () => ({
|
||||
StorageInfo: jest.fn(() => {
|
||||
return Promise.resolve({ storageInfo: { Used: [60] } })
|
||||
return Promise.resolve({
|
||||
used: 60
|
||||
})
|
||||
}),
|
||||
ServerInfo: jest.fn(() => {
|
||||
return Promise.resolve({
|
||||
@@ -39,7 +41,7 @@ describe("Common actions", () => {
|
||||
it("creates common/SET_STORAGE_INFO after fetching the storage details ", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: [60] } }
|
||||
{ type: "common/SET_STORAGE_INFO", storageInfo: { used: 60 } }
|
||||
]
|
||||
return store.dispatch(actionsCommon.fetchStorageInfo()).then(() => {
|
||||
const actions = store.getActions()
|
||||
|
||||
@@ -21,11 +21,7 @@ describe("common reducer", () => {
|
||||
it("should return the initial state", () => {
|
||||
expect(reducer(undefined, {})).toEqual({
|
||||
sidebarOpen: false,
|
||||
storageInfo: {
|
||||
total: [0],
|
||||
free: [0],
|
||||
used: [0]
|
||||
},
|
||||
storageInfo: {used: 0},
|
||||
serverInfo: {}
|
||||
})
|
||||
})
|
||||
@@ -62,11 +58,11 @@ describe("common reducer", () => {
|
||||
{},
|
||||
{
|
||||
type: actionsCommon.SET_STORAGE_INFO,
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
storageInfo: { }
|
||||
}
|
||||
)
|
||||
).toEqual({
|
||||
storageInfo: { total: [100], free: [40] }
|
||||
storageInfo: { }
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -33,8 +33,7 @@ export const fetchStorageInfo = () => {
|
||||
return function(dispatch) {
|
||||
return web.StorageInfo().then(res => {
|
||||
const storageInfo = {
|
||||
total: res.storageInfo.Total,
|
||||
used: res.storageInfo.Used
|
||||
used: res.used
|
||||
}
|
||||
dispatch(setStorageInfo(storageInfo))
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ import * as actionsCommon from "./actions"
|
||||
export default (
|
||||
state = {
|
||||
sidebarOpen: false,
|
||||
storageInfo: { total: [0], free: [0], used: [0] },
|
||||
storageInfo: {used: 0},
|
||||
serverInfo: {}
|
||||
},
|
||||
action
|
||||
|
||||
24
browser/app/js/browser/selectors.js
Normal file
24
browser/app/js/browser/selectors.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { createSelector } from "reselect"
|
||||
|
||||
export const getServerInfo = state => state.browser.serverInfo
|
||||
|
||||
export const hasServerPublicDomain = createSelector(
|
||||
getServerInfo,
|
||||
serverInfo => Boolean(serverInfo.info && serverInfo.info.domains && serverInfo.info.domains.length),
|
||||
)
|
||||
@@ -31,19 +31,38 @@ export const SET_POLICIES = "buckets/SET_POLICIES"
|
||||
|
||||
export const fetchBuckets = () => {
|
||||
return function(dispatch) {
|
||||
const { bucket, prefix } = pathSlice(history.location.pathname)
|
||||
return web.ListBuckets().then(res => {
|
||||
const buckets = res.buckets ? res.buckets.map(bucket => bucket.name) : []
|
||||
dispatch(setList(buckets))
|
||||
if (buckets.length > 0) {
|
||||
const { bucket, prefix } = pathSlice(history.location.pathname)
|
||||
dispatch(setList(buckets))
|
||||
if (bucket && buckets.indexOf(bucket) > -1) {
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(selectBucket(buckets[0]))
|
||||
}
|
||||
} else {
|
||||
dispatch(selectBucket(""))
|
||||
history.replace("/")
|
||||
if (bucket) {
|
||||
dispatch(setList([bucket]))
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(selectBucket(""))
|
||||
history.replace("/")
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
if (bucket && err.message === "Access Denied." || err.message.indexOf('Prefix access is denied') > -1 ) {
|
||||
dispatch(setList([bucket]))
|
||||
dispatch(selectBucket(bucket, prefix))
|
||||
} else {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: err.message,
|
||||
autoClear: true,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -22,7 +22,8 @@ const bucketsFilterSelector = state => state.buckets.filter
|
||||
export const getFilteredBuckets = createSelector(
|
||||
bucketsSelector,
|
||||
bucketsFilterSelector,
|
||||
(buckets, filter) => buckets.filter(bucket => bucket.indexOf(filter) > -1)
|
||||
(buckets, filter) => buckets.filter(
|
||||
bucket => bucket.toLowerCase().indexOf(filter.toLowerCase()) > -1)
|
||||
)
|
||||
|
||||
export const getCurrentBucket = state => state.buckets.currentBucket
|
||||
|
||||
@@ -47,6 +47,11 @@ export class ObjectActions extends React.Component {
|
||||
SHARE_OBJECT_EXPIRY_MINUTES
|
||||
)
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadObject } = this.props
|
||||
downloadObject(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
@@ -82,6 +87,7 @@ export class ObjectActions extends React.Component {
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Share"
|
||||
onClick={this.shareObject.bind(this)}
|
||||
>
|
||||
<i className="fas fa-share-alt" />
|
||||
@@ -90,6 +96,7 @@ export class ObjectActions extends React.Component {
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Preview"
|
||||
onClick={this.showPreviewModal.bind(this)}
|
||||
>
|
||||
<i className="far fa-file-image" />
|
||||
@@ -98,6 +105,15 @@ export class ObjectActions extends React.Component {
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
@@ -134,6 +150,7 @@ const mapStateToProps = (state, ownProps) => {
|
||||
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadObject: object => dispatch(objectsActions.downloadObject(object)),
|
||||
shareObject: (object, days, hours, minutes) =>
|
||||
dispatch(objectsActions.shareObject(object, days, hours, minutes)),
|
||||
deleteObject: (object) => dispatch(objectsActions.deleteObject(object)),
|
||||
|
||||
@@ -54,8 +54,11 @@ export const ObjectItem = ({
|
||||
href={getDataType(name, contentType) === "folder" ? name : "#"}
|
||||
onClick={e => {
|
||||
e.preventDefault()
|
||||
// onclick function is passed only when we have a prefix
|
||||
if (onClick) {
|
||||
onClick()
|
||||
} else {
|
||||
checked ? uncheckObject(name) : checkObject(name)
|
||||
}
|
||||
}}
|
||||
>
|
||||
|
||||
@@ -18,6 +18,7 @@ import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import InfiniteScroll from "react-infinite-scroller"
|
||||
import ObjectsList from "./ObjectsList"
|
||||
import { getFilteredObjects } from "./selectors"
|
||||
|
||||
export class ObjectsListContainer extends React.Component {
|
||||
constructor(props) {
|
||||
@@ -39,22 +40,29 @@ export class ObjectsListContainer extends React.Component {
|
||||
})
|
||||
}
|
||||
}
|
||||
componentDidUpdate(prevProps) {
|
||||
if (this.props.filter !== prevProps.filter) {
|
||||
this.setState({
|
||||
page: 1
|
||||
})
|
||||
}
|
||||
}
|
||||
loadNextPage() {
|
||||
this.setState(state => {
|
||||
return { page: state.page + 1 }
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { objects, listLoading } = this.props
|
||||
const { filteredObjects, listLoading } = this.props
|
||||
|
||||
const visibleObjects = objects.slice(0, this.state.page * 100)
|
||||
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
|
||||
|
||||
return (
|
||||
<div style={{ position: "relative" }}>
|
||||
<InfiniteScroll
|
||||
pageStart={0}
|
||||
loadMore={this.loadNextPage}
|
||||
hasMore={objects.length > visibleObjects.length}
|
||||
hasMore={filteredObjects.length > visibleObjects.length}
|
||||
useWindow={true}
|
||||
initialLoad={false}
|
||||
>
|
||||
@@ -70,7 +78,8 @@ const mapStateToProps = state => {
|
||||
return {
|
||||
currentBucket: state.buckets.currentBucket,
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
objects: state.objects.list,
|
||||
filteredObjects: getFilteredObjects(state),
|
||||
filter: state.objects.filter,
|
||||
sortBy: state.objects.sortBy,
|
||||
sortOrder: state.objects.sortOrder,
|
||||
listLoading: state.objects.listLoading
|
||||
|
||||
43
browser/app/js/objects/ObjectsSearch.js
Normal file
43
browser/app/js/objects/ObjectsSearch.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import * as actionsObjects from "./actions"
|
||||
|
||||
export const ObjectsSearch = ({ onChange }) => (
|
||||
<div
|
||||
className="input-group ig-left ig-search-dark"
|
||||
style={{ display: "block" }}
|
||||
>
|
||||
<input
|
||||
className="ig-text"
|
||||
type="input"
|
||||
placeholder="Search Objects..."
|
||||
onChange={e => onChange(e.target.value)}
|
||||
/>
|
||||
<i className="ig-helpers" />
|
||||
</div>
|
||||
)
|
||||
|
||||
const mapDispatchToProps = dispatch => {
|
||||
return {
|
||||
onChange: filter =>
|
||||
dispatch(actionsObjects.setFilter(filter))
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(undefined, mapDispatchToProps)(ObjectsSearch)
|
||||
95
browser/app/js/objects/PrefixActions.js
Normal file
95
browser/app/js/objects/PrefixActions.js
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import { Dropdown } from "react-bootstrap"
|
||||
import DeleteObjectConfirmModal from "./DeleteObjectConfirmModal"
|
||||
import * as actions from "./actions"
|
||||
|
||||
export class PrefixActions extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.state = {
|
||||
showDeleteConfirmation: false,
|
||||
}
|
||||
}
|
||||
handleDownload(e) {
|
||||
e.preventDefault()
|
||||
const { object, downloadPrefix } = this.props
|
||||
downloadPrefix(object.name)
|
||||
}
|
||||
deleteObject() {
|
||||
const { object, deleteObject } = this.props
|
||||
deleteObject(object.name)
|
||||
}
|
||||
showDeleteConfirmModal(e) {
|
||||
e.preventDefault()
|
||||
this.setState({ showDeleteConfirmation: true })
|
||||
}
|
||||
hideDeleteConfirmModal() {
|
||||
this.setState({
|
||||
showDeleteConfirmation: false,
|
||||
})
|
||||
}
|
||||
render() {
|
||||
const { object, showShareObjectModal, shareObjectName } = this.props
|
||||
return (
|
||||
<Dropdown id={`obj-actions-${object.name}`}>
|
||||
<Dropdown.Toggle noCaret className="fia-toggle" />
|
||||
<Dropdown.Menu>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Download as zip"
|
||||
onClick={this.handleDownload.bind(this)}
|
||||
>
|
||||
<i className="fas fa-cloud-download-alt" />
|
||||
</a>
|
||||
<a
|
||||
href=""
|
||||
className="fiad-action"
|
||||
title="Delete"
|
||||
onClick={this.showDeleteConfirmModal.bind(this)}
|
||||
>
|
||||
<i className="fas fa-trash-alt" />
|
||||
</a>
|
||||
</Dropdown.Menu>
|
||||
{this.state.showDeleteConfirmation && (
|
||||
<DeleteObjectConfirmModal
|
||||
deleteObject={this.deleteObject.bind(this)}
|
||||
hideDeleteConfirmModal={this.hideDeleteConfirmModal.bind(this)}
|
||||
/>
|
||||
)}
|
||||
</Dropdown>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = (dispatch) => {
|
||||
return {
|
||||
downloadPrefix: object => dispatch(actions.downloadPrefix(object)),
|
||||
deleteObject: (object) => dispatch(actions.deleteObject(object)),
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps, mapDispatchToProps)(PrefixActions)
|
||||
@@ -17,22 +17,32 @@
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import ObjectItem from "./ObjectItem"
|
||||
import PrefixActions from "./PrefixActions"
|
||||
import * as actionsObjects from "./actions"
|
||||
import { getCheckedList } from "./selectors"
|
||||
|
||||
export const PrefixContainer = ({ object, currentPrefix, selectPrefix }) => {
|
||||
export const PrefixContainer = ({
|
||||
object,
|
||||
currentPrefix,
|
||||
checkedObjectsCount,
|
||||
selectPrefix
|
||||
}) => {
|
||||
const props = {
|
||||
name: object.name,
|
||||
contentType: object.contentType,
|
||||
onClick: () => selectPrefix(`${currentPrefix}${object.name}`)
|
||||
}
|
||||
|
||||
if (checkedObjectsCount == 0) {
|
||||
props.actionButtons = <PrefixActions object={object} />
|
||||
}
|
||||
return <ObjectItem {...props} />
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
return {
|
||||
object: ownProps.object,
|
||||
currentPrefix: state.objects.currentPrefix
|
||||
currentPrefix: state.objects.currentPrefix,
|
||||
checkedObjectsCount: getCheckedList(state).length
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,10 @@ class PreviewObjectModal extends React.Component {
|
||||
this.state = {
|
||||
url: "",
|
||||
}
|
||||
props.getObjectURL(props.object.name, (url) => {
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.props.getObjectURL(this.props.object.name, (url) => {
|
||||
this.setState({
|
||||
url: url,
|
||||
})
|
||||
@@ -43,11 +46,11 @@ class PreviewObjectModal extends React.Component {
|
||||
<ModalBody>
|
||||
<div className="input-group">
|
||||
{this.state.url && (
|
||||
<img
|
||||
alt="Image broken"
|
||||
src={this.state.url}
|
||||
style={{ display: "block", width: "100%" }}
|
||||
/>
|
||||
<object data={this.state.url} style={{ display: "block", width: "100%" }}>
|
||||
<h3 style={{ textAlign: "center", display: "block", width: "100%" }}>
|
||||
Do not have read permissions to preview "{this.props.object.name}"
|
||||
</h3>
|
||||
</object>
|
||||
)}
|
||||
</div>
|
||||
</ModalBody>
|
||||
|
||||
@@ -77,7 +77,8 @@ export class ShareObjectModal extends React.Component {
|
||||
hideShareObject()
|
||||
}
|
||||
render() {
|
||||
const { shareObjectDetails, shareObject, hideShareObject } = this.props
|
||||
const { shareObjectDetails, hideShareObject } = this.props
|
||||
const url = `${window.location.protocol}//${shareObjectDetails.url}`
|
||||
return (
|
||||
<Modal
|
||||
show={true}
|
||||
@@ -93,11 +94,12 @@ export class ShareObjectModal extends React.Component {
|
||||
type="text"
|
||||
ref={node => (this.copyTextInput = node)}
|
||||
readOnly="readOnly"
|
||||
value={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
value={url}
|
||||
onClick={() => this.copyTextInput.select()}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
{shareObjectDetails.showExpiryDate && (
|
||||
<div
|
||||
className="input-group"
|
||||
style={{ display: web.LoggedIn() ? "block" : "none" }}
|
||||
>
|
||||
@@ -174,10 +176,11 @@ export class ShareObjectModal extends React.Component {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</ModalBody>
|
||||
<div className="modal-footer">
|
||||
<CopyToClipboard
|
||||
text={window.location.protocol + "//" + shareObjectDetails.url}
|
||||
text={url}
|
||||
onCopy={this.onUrlCopied.bind(this)}
|
||||
>
|
||||
<button className="btn btn-success">Copy Link</button>
|
||||
|
||||
@@ -67,6 +67,20 @@ describe("ObjectActions", () => {
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadObject when single object is selected and download button is clicked", () => {
|
||||
const downloadObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectActions
|
||||
object={{ name: "obj1" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadObject={downloadObject} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.at(1)
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadObject).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
|
||||
it("should show PreviewObjectModal when preview action is clicked", () => {
|
||||
@@ -106,7 +120,7 @@ describe("ObjectActions", () => {
|
||||
)
|
||||
expect(wrapper
|
||||
.find("a")
|
||||
.length).toBe(2) // find only the other 2
|
||||
.length).toBe(3) // find only the other 2
|
||||
})
|
||||
|
||||
it("should call shareObject with object and expiry", () => {
|
||||
|
||||
@@ -30,7 +30,10 @@ describe("ObjectItem", () => {
|
||||
|
||||
it("shouldn't call onClick when the object isclicked", () => {
|
||||
const onClick = jest.fn()
|
||||
const wrapper = shallow(<ObjectItem name={"test"} />)
|
||||
const checkObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectItem name={"test"} checkObject={checkObject} />
|
||||
)
|
||||
wrapper.find("a").simulate("click", { preventDefault: jest.fn() })
|
||||
expect(onClick).not.toHaveBeenCalled()
|
||||
})
|
||||
@@ -57,9 +60,15 @@ describe("ObjectItem", () => {
|
||||
})
|
||||
|
||||
it("should call uncheckObject when the object/prefix is unchecked", () => {
|
||||
const checkObject = jest.fn()
|
||||
const uncheckObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<ObjectItem name={"test"} checked={true} uncheckObject={uncheckObject} />
|
||||
<ObjectItem
|
||||
name={"test"}
|
||||
checked={true}
|
||||
checkObject={checkObject}
|
||||
uncheckObject={uncheckObject}
|
||||
/>
|
||||
)
|
||||
wrapper.find("input[type='checkbox']").simulate("change")
|
||||
expect(uncheckObject).toHaveBeenCalledWith("test")
|
||||
|
||||
@@ -20,13 +20,13 @@ import { ObjectsListContainer } from "../ObjectsListContainer"
|
||||
|
||||
describe("ObjectsList", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsListContainer objects={[]} />)
|
||||
shallow(<ObjectsListContainer filteredObjects={[]} />)
|
||||
})
|
||||
|
||||
it("should render ObjectsList with objects", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
objects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
filteredObjects={[{ name: "test1.jpg" }, { name: "test2.jpg" }]}
|
||||
/>
|
||||
)
|
||||
expect(wrapper.find("ObjectsList").length).toBe(1)
|
||||
@@ -40,7 +40,7 @@ describe("ObjectsList", () => {
|
||||
const wrapper = shallow(
|
||||
<ObjectsListContainer
|
||||
currentBucket="test1"
|
||||
objects={[]}
|
||||
filteredObjects={[]}
|
||||
listLoading={true}
|
||||
/>
|
||||
)
|
||||
|
||||
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
32
browser/app/js/objects/__tests__/ObjectsSearch.test.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { ObjectsSearch } from "../ObjectsSearch"
|
||||
|
||||
describe("ObjectsSearch", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<ObjectsSearch />)
|
||||
})
|
||||
|
||||
it("should call onChange with search text", () => {
|
||||
const onChange = jest.fn()
|
||||
const wrapper = shallow(<ObjectsSearch onChange={onChange} />)
|
||||
wrapper.find("input").simulate("change", { target: { value: "test" } })
|
||||
expect(onChange).toHaveBeenCalledWith("test")
|
||||
})
|
||||
})
|
||||
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
84
browser/app/js/objects/__tests__/PrefixActions.test.js
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { shallow } from "enzyme"
|
||||
import { PrefixActions } from "../PrefixActions"
|
||||
|
||||
describe("PrefixActions", () => {
|
||||
it("should render without crashing", () => {
|
||||
shallow(<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />)
|
||||
})
|
||||
|
||||
it("should show DeleteObjectConfirmModal when delete action is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeTruthy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(1)
|
||||
})
|
||||
|
||||
it("should hide DeleteObjectConfirmModal when Cancel button is clicked", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixActions object={{ name: "abc/" }} currentPrefix={"pre1/"} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("hideDeleteConfirmModal")()
|
||||
wrapper.update()
|
||||
expect(wrapper.state("showDeleteConfirmation")).toBeFalsy()
|
||||
expect(wrapper.find("DeleteObjectConfirmModal").length).toBe(0)
|
||||
})
|
||||
|
||||
it("should call deleteObject with object name", () => {
|
||||
const deleteObject = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
deleteObject={deleteObject}
|
||||
/>
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.last()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
wrapper.find("DeleteObjectConfirmModal").prop("deleteObject")()
|
||||
expect(deleteObject).toHaveBeenCalledWith("abc/")
|
||||
})
|
||||
|
||||
|
||||
it("should call downloadPrefix when single object is selected and download button is clicked", () => {
|
||||
const downloadPrefix = jest.fn()
|
||||
const wrapper = shallow(
|
||||
<PrefixActions
|
||||
object={{ name: "abc/" }}
|
||||
currentPrefix={"pre1/"}
|
||||
downloadPrefix={downloadPrefix} />
|
||||
)
|
||||
wrapper
|
||||
.find("a")
|
||||
.first()
|
||||
.simulate("click", { preventDefault: jest.fn() })
|
||||
expect(downloadPrefix).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -41,4 +41,22 @@ describe("PrefixContainer", () => {
|
||||
wrapper.find("Connect(ObjectItem)").prop("onClick")()
|
||||
expect(selectPrefix).toHaveBeenCalledWith("xyz/abc/")
|
||||
})
|
||||
|
||||
it("should pass actions to ObjectItem", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={0} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).not.toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
|
||||
it("should pass empty actions to ObjectItem when checkedObjectCount is more than 0", () => {
|
||||
const wrapper = shallow(
|
||||
<PrefixContainer object={{ name: "abc/" }} checkedObjectsCount={1} />
|
||||
)
|
||||
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).toBe(
|
||||
undefined
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -34,7 +34,7 @@ describe("ShareObjectModal", () => {
|
||||
shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
})
|
||||
@@ -44,7 +44,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
/>
|
||||
)
|
||||
@@ -59,7 +59,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
/>
|
||||
)
|
||||
expect(
|
||||
@@ -76,7 +76,7 @@ describe("ShareObjectModal", () => {
|
||||
const wrapper = shallow(
|
||||
<ShareObjectModal
|
||||
object={{ name: "obj1" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test" }}
|
||||
shareObjectDetails={{ show: true, object: "obj1", url: "test", showExpiryDate: true }}
|
||||
hideShareObject={hideShareObject}
|
||||
showCopyAlert={showCopyAlert}
|
||||
/>
|
||||
@@ -89,8 +89,15 @@ describe("ShareObjectModal", () => {
|
||||
describe("Update expiry values", () => {
|
||||
const props = {
|
||||
object: { name: "obj1" },
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test" }
|
||||
shareObjectDetails: { show: true, object: "obj1", url: "test", showExpiryDate: true }
|
||||
}
|
||||
|
||||
it("should not show expiry values if shared with public link", () => {
|
||||
const shareObjectDetails = { show: true, object: "obj1", url: "test", showExpiryDate: false }
|
||||
const wrapper = shallow(<ShareObjectModal {...props} shareObjectDetails={shareObjectDetails} />)
|
||||
expect(wrapper.find('.set-expire').exists()).toEqual(false)
|
||||
})
|
||||
|
||||
it("should have default expiry values", () => {
|
||||
const wrapper = shallow(<ShareObjectModal {...props} />)
|
||||
expect(wrapper.state("expiry")).toEqual({
|
||||
|
||||
@@ -34,6 +34,7 @@ jest.mock("../../web", () => ({
|
||||
.mockReturnValueOnce(false)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(true)
|
||||
.mockReturnValueOnce(false),
|
||||
ListObjects: jest.fn(({ bucketName }) => {
|
||||
if (bucketName === "test-deny") {
|
||||
@@ -70,6 +71,16 @@ jest.mock("../../web", () => ({
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
})
|
||||
.mockImplementationOnce(() => {
|
||||
return Promise.resolve({ token: "test" })
|
||||
}),
|
||||
GetBucketPolicy: jest.fn(({ bucketName, prefix }) => {
|
||||
if (!bucketName) {
|
||||
return Promise.reject({ message: "Invalid bucket" })
|
||||
}
|
||||
if (bucketName === 'test-public') return Promise.resolve({ policy: 'readonly' })
|
||||
return Promise.resolve({})
|
||||
})
|
||||
}))
|
||||
|
||||
const middlewares = [thunk]
|
||||
@@ -295,7 +306,8 @@ describe("Objects actions", () => {
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "b.txt",
|
||||
url: "test"
|
||||
url: "test",
|
||||
showExpiryDate: true
|
||||
}
|
||||
]
|
||||
store.dispatch(actionsObjects.showShareObject("b.txt", "test"))
|
||||
@@ -321,14 +333,16 @@ describe("Objects actions", () => {
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "https://test.com/bk1/pre1/b.txt"
|
||||
url: "https://test.com/bk1/pre1/b.txt",
|
||||
showExpiryDate: true
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
@@ -347,10 +361,42 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/SET_SHARE_OBJECT when object is shared with public link", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "test-public" },
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: { info: { domains: ['public.com'] }} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
type: "objects/SET_SHARE_OBJECT",
|
||||
show: true,
|
||||
object: "a.txt",
|
||||
url: "public.com/test-public/pre1/a.txt",
|
||||
showExpiryDate: false
|
||||
},
|
||||
{
|
||||
type: "alert/SET",
|
||||
alert: {
|
||||
type: "success",
|
||||
message: "Object shared.",
|
||||
id: alertActions.alertId
|
||||
}
|
||||
}
|
||||
]
|
||||
return store
|
||||
.dispatch(actionsObjects.shareObject("a.txt", 1, 0, 0))
|
||||
.then(() => {
|
||||
const actions = store.getActions()
|
||||
expect(actions).toEqual(expectedActions)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates alert/SET when shareObject is failed", () => {
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
objects: { currentPrefix: "pre1/" },
|
||||
browser: { serverInfo: {} },
|
||||
})
|
||||
const expectedActions = [
|
||||
{
|
||||
@@ -442,6 +488,34 @@ describe("Objects actions", () => {
|
||||
})
|
||||
})
|
||||
|
||||
it("should download prefix", () => {
|
||||
const open = jest.fn()
|
||||
const send = jest.fn()
|
||||
const xhrMockClass = () => ({
|
||||
open: open,
|
||||
send: send
|
||||
})
|
||||
window.XMLHttpRequest = jest.fn().mockImplementation(xhrMockClass)
|
||||
|
||||
const store = mockStore({
|
||||
buckets: { currentBucket: "bk1" },
|
||||
objects: { currentPrefix: "pre1/" }
|
||||
})
|
||||
return store.dispatch(actionsObjects.downloadPrefix("pre2/")).then(() => {
|
||||
const requestUrl = `${
|
||||
location.origin
|
||||
}${minioBrowserPrefix}/zip?token=test`
|
||||
expect(open).toHaveBeenCalledWith("POST", requestUrl, true)
|
||||
expect(send).toHaveBeenCalledWith(
|
||||
JSON.stringify({
|
||||
bucketName: "bk1",
|
||||
prefix: "pre1/",
|
||||
objects: ["pre2/"]
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it("creates objects/CHECKED_LIST_ADD action", () => {
|
||||
const store = mockStore()
|
||||
const expectedActions = [
|
||||
|
||||
@@ -23,6 +23,7 @@ describe("objects reducer", () => {
|
||||
const initialState = reducer(undefined, {})
|
||||
expect(initialState).toEqual({
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
|
||||
@@ -24,7 +24,6 @@ import {
|
||||
import { getCurrentBucket } from "../buckets/selectors"
|
||||
import { getCurrentPrefix, getCheckedList } from "./selectors"
|
||||
import * as alertActions from "../alert/actions"
|
||||
import * as bucketActions from "../buckets/actions"
|
||||
import {
|
||||
minioBrowserPrefix,
|
||||
SORT_BY_NAME,
|
||||
@@ -33,9 +32,11 @@ import {
|
||||
SORT_ORDER_ASC,
|
||||
SORT_ORDER_DESC,
|
||||
} from "../constants"
|
||||
import { getServerInfo, hasServerPublicDomain } from '../browser/selectors'
|
||||
|
||||
export const SET_LIST = "objects/SET_LIST"
|
||||
export const RESET_LIST = "objects/RESET_LIST"
|
||||
export const SET_FILTER = "objects/SET_FILTER"
|
||||
export const APPEND_LIST = "objects/APPEND_LIST"
|
||||
export const REMOVE = "objects/REMOVE"
|
||||
export const SET_SORT_BY = "objects/SET_SORT_BY"
|
||||
@@ -57,6 +58,13 @@ export const resetList = () => ({
|
||||
type: RESET_LIST,
|
||||
})
|
||||
|
||||
export const setFilter = filter => {
|
||||
return {
|
||||
type: SET_FILTER,
|
||||
filter
|
||||
}
|
||||
}
|
||||
|
||||
export const setListLoading = (listLoading) => ({
|
||||
type: SET_LIST_LOADING,
|
||||
listLoading,
|
||||
@@ -222,19 +230,38 @@ export const deleteCheckedObjects = () => {
|
||||
|
||||
export const shareObject = (object, days, hours, minutes) => {
|
||||
return function (dispatch, getState) {
|
||||
const hasServerDomain = hasServerPublicDomain(getState())
|
||||
const currentBucket = getCurrentBucket(getState())
|
||||
const currentPrefix = getCurrentPrefix(getState())
|
||||
const objectName = `${currentPrefix}${object}`
|
||||
const expiry = days * 24 * 60 * 60 + hours * 60 * 60 + minutes * 60
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry,
|
||||
.GetBucketPolicy({ bucketName: currentBucket, prefix: currentPrefix })
|
||||
.catch(() => ({ policy: null }))
|
||||
.then(({ policy }) => {
|
||||
if (hasServerDomain && ['readonly', 'readwrite'].includes(policy)) {
|
||||
const domain = getServerInfo(getState()).info.domains[0]
|
||||
const url = `${domain}/${currentBucket}/${encodeURI(objectName)}`
|
||||
dispatch(showShareObject(object, url, false))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "Object shared."
|
||||
})
|
||||
)
|
||||
} else {
|
||||
return web
|
||||
.PresignedGet({
|
||||
host: location.host,
|
||||
bucket: currentBucket,
|
||||
object: objectName,
|
||||
expiry: expiry
|
||||
})
|
||||
}
|
||||
})
|
||||
.then((obj) => {
|
||||
if (!obj) return
|
||||
dispatch(showShareObject(object, obj.url))
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
@@ -272,11 +299,12 @@ export const shareObject = (object, days, hours, minutes) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const showShareObject = (object, url) => ({
|
||||
export const showShareObject = (object, url, showExpiryDate = true) => ({
|
||||
type: SET_SHARE_OBJECT,
|
||||
show: true,
|
||||
object,
|
||||
url,
|
||||
showExpiryDate,
|
||||
})
|
||||
|
||||
export const hideShareObject = (object, url) => ({
|
||||
@@ -340,6 +368,19 @@ export const downloadObject = (object) => {
|
||||
}
|
||||
}
|
||||
|
||||
export const downloadPrefix = (object) => {
|
||||
return function (dispatch, getState) {
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
[object],
|
||||
`${object.slice(0, -1)}.zip`,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export const checkObject = (object) => ({
|
||||
type: CHECKED_LIST_ADD,
|
||||
object,
|
||||
@@ -356,21 +397,28 @@ export const resetCheckedList = () => ({
|
||||
|
||||
export const downloadCheckedObjects = () => {
|
||||
return function (dispatch, getState) {
|
||||
const state = getState()
|
||||
return downloadObjects(
|
||||
getCurrentBucket(getState()),
|
||||
getCurrentPrefix(getState()),
|
||||
getCheckedList(getState()),
|
||||
null,
|
||||
dispatch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const downloadObjects = (bucketName, prefix, objects, filename, dispatch) => {
|
||||
const req = {
|
||||
bucketName: getCurrentBucket(state),
|
||||
prefix: getCurrentPrefix(state),
|
||||
objects: getCheckedList(state),
|
||||
bucketName: bucketName,
|
||||
prefix: prefix,
|
||||
objects: objects,
|
||||
}
|
||||
if (!web.LoggedIn()) {
|
||||
const requestUrl = location.origin + "/minio/zip?token="
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
} else {
|
||||
if (web.LoggedIn()) {
|
||||
return web
|
||||
.CreateURLToken()
|
||||
.then((res) => {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=${res.token}`
|
||||
downloadZip(requestUrl, req, dispatch)
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
})
|
||||
.catch((err) =>
|
||||
dispatch(
|
||||
@@ -380,11 +428,13 @@ export const downloadCheckedObjects = () => {
|
||||
})
|
||||
)
|
||||
)
|
||||
} else {
|
||||
const requestUrl = `${location.origin}${minioBrowserPrefix}/zip?token=`
|
||||
downloadZip(requestUrl, req, filename, dispatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const downloadZip = (url, req, dispatch) => {
|
||||
const downloadZip = (url, req, filename, dispatch) => {
|
||||
var anchor = document.createElement("a")
|
||||
document.body.appendChild(anchor)
|
||||
|
||||
@@ -402,7 +452,7 @@ const downloadZip = (url, req, dispatch) => {
|
||||
var separator = req.prefix.length > 1 ? "-" : ""
|
||||
|
||||
anchor.href = blobUrl
|
||||
anchor.download =
|
||||
anchor.download = filename ||
|
||||
req.bucketName + separator + req.prefix.slice(0, -1) + ".zip"
|
||||
|
||||
anchor.click()
|
||||
|
||||
@@ -28,6 +28,7 @@ const removeObject = (list, objectToRemove, lookup) => {
|
||||
export default (
|
||||
state = {
|
||||
list: [],
|
||||
filter: "",
|
||||
listLoading: false,
|
||||
sortBy: "",
|
||||
sortOrder: SORT_ORDER_ASC,
|
||||
@@ -53,6 +54,11 @@ export default (
|
||||
...state,
|
||||
list: []
|
||||
}
|
||||
case actionsObjects.SET_FILTER:
|
||||
return {
|
||||
...state,
|
||||
filter: action.filter
|
||||
}
|
||||
case actionsObjects.SET_LIST_LOADING:
|
||||
return {
|
||||
...state,
|
||||
@@ -89,7 +95,8 @@ export default (
|
||||
shareObject: {
|
||||
show: action.show,
|
||||
object: action.object,
|
||||
url: action.url
|
||||
url: action.url,
|
||||
showExpiryDate: action.showExpiryDate
|
||||
}
|
||||
}
|
||||
case actionsObjects.CHECKED_LIST_ADD:
|
||||
|
||||
@@ -21,3 +21,13 @@ export const getCurrentPrefix = state => state.objects.currentPrefix
|
||||
export const getCheckedList = state => state.objects.checkedList
|
||||
|
||||
export const getPrefixWritable = state => state.objects.prefixWritable
|
||||
|
||||
const objectsSelector = state => state.objects.list
|
||||
const objectsFilterSelector = state => state.objects.filter
|
||||
|
||||
export const getFilteredObjects = createSelector(
|
||||
objectsSelector,
|
||||
objectsFilterSelector,
|
||||
(objects, filter) => objects.filter(
|
||||
object => object.name.toLowerCase().startsWith(filter.toLowerCase()))
|
||||
)
|
||||
@@ -36,7 +36,7 @@ export class Dropzone extends React.Component {
|
||||
// Overwrite the default styling from react-dropzone; otherwise it
|
||||
// won't handle child elements correctly.
|
||||
const style = {
|
||||
height: "100%",
|
||||
flex: "1",
|
||||
borderWidth: "0",
|
||||
borderStyle: "dashed",
|
||||
borderColor: "#fff"
|
||||
@@ -48,18 +48,29 @@ export class Dropzone extends React.Component {
|
||||
const rejectStyle = {
|
||||
backgroundColor: "#ffdddd"
|
||||
}
|
||||
const getStyle = (isDragActive, isDragAccept, isDragReject) => ({
|
||||
...style,
|
||||
...(isDragActive ? activeStyle : {}),
|
||||
...(isDragReject ? rejectStyle : {})
|
||||
})
|
||||
|
||||
// disableClick means that it won't trigger a file upload box when
|
||||
// the user clicks on a file.
|
||||
return (
|
||||
<ReactDropzone
|
||||
style={style}
|
||||
activeStyle={activeStyle}
|
||||
rejectStyle={rejectStyle}
|
||||
disableClick={true}
|
||||
onDrop={this.onDrop.bind(this)}
|
||||
>
|
||||
{this.props.children}
|
||||
{({getRootProps, getInputProps, isDragActive, isDragAccept, isDragReject}) => (
|
||||
<div
|
||||
{...getRootProps({
|
||||
onClick: event => event.stopPropagation()
|
||||
})}
|
||||
style={getStyle(isDragActive, isDragAccept, isDragReject)}
|
||||
>
|
||||
<input {...getInputProps()} />
|
||||
{this.props.children}
|
||||
</div>
|
||||
)}
|
||||
</ReactDropzone>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -89,11 +89,16 @@ export const uploadFile = file => {
|
||||
return
|
||||
}
|
||||
const currentPrefix = getCurrentPrefix(state)
|
||||
const objectName = `${currentPrefix}${file.name}`
|
||||
var _filePath = file.path || file.name
|
||||
if (_filePath.charAt(0) == '/') {
|
||||
_filePath = _filePath.substring(1)
|
||||
}
|
||||
const filePath = _filePath
|
||||
const objectName = `${currentPrefix}${filePath}`
|
||||
const uploadUrl = `${
|
||||
window.location.origin
|
||||
}${minioBrowserPrefix}/upload/${currentBucket}/${objectName}`
|
||||
const slug = `${currentBucket}-${currentPrefix}-${file.name}`
|
||||
const slug = `${currentBucket}-${currentPrefix}-${filePath}`
|
||||
|
||||
let xhr = new XMLHttpRequest()
|
||||
xhr.open("PUT", uploadUrl, true)
|
||||
@@ -141,7 +146,7 @@ export const uploadFile = file => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "success",
|
||||
message: "File '" + file.name + "' uploaded successfully."
|
||||
message: "File '" + filePath + "' uploaded successfully."
|
||||
})
|
||||
)
|
||||
dispatch(objectsActions.selectPrefix(currentPrefix))
|
||||
@@ -153,7 +158,7 @@ export const uploadFile = file => {
|
||||
dispatch(
|
||||
alertActions.set({
|
||||
type: "danger",
|
||||
message: "Error occurred uploading '" + file.name + "'."
|
||||
message: "Error occurred uploading '" + filePath + "'."
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
@@ -21,7 +21,7 @@ import storage from 'local-storage-fallback'
|
||||
|
||||
class Web {
|
||||
constructor(endpoint) {
|
||||
const namespace = 'Web'
|
||||
const namespace = 'web'
|
||||
this.JSONrpc = new JSONrpc({
|
||||
endpoint,
|
||||
namespace
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
@media(max-width: @screen-sm-max) {
|
||||
padding: 75px 0 80px;
|
||||
}
|
||||
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-height:100vh;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
@@ -169,6 +169,24 @@ select.form-control {
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search-dark {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
font-weight: 900;
|
||||
content: '\f002';
|
||||
font-size: 15px;
|
||||
position: absolute;
|
||||
left: 2px;
|
||||
top: 8px;
|
||||
color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.ig-text {
|
||||
padding-left: 25px;
|
||||
.placeholder(rgba(0, 0, 0, 0.5))
|
||||
}
|
||||
}
|
||||
|
||||
.ig-search {
|
||||
&:before {
|
||||
font-family: @font-family-icon;
|
||||
@@ -270,4 +288,4 @@ select.form-control {
|
||||
.set-expire-decrease {
|
||||
bottom: -27px;
|
||||
.rotate(-180deg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ div.fesl-row {
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: default;
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,12 +114,6 @@ div.fesl-row {
|
||||
----------------------------*/
|
||||
&[data-type=folder] {
|
||||
.list-type(#a1d6dd, '\f07b');
|
||||
|
||||
.fesl-item-name {
|
||||
a {
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
}
|
||||
&[data-type=pdf] {.list-type(#fa7775, '\f1c1'); }
|
||||
&[data-type=zip] { .list-type(#427089, '\f1c6'); }
|
||||
@@ -355,6 +349,7 @@ div.fesl-row {
|
||||
margin: 0;
|
||||
height: 100%;
|
||||
text-align: right;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.dropdown {
|
||||
@@ -501,4 +496,4 @@ div.fesl-row {
|
||||
.opacity(1);
|
||||
right: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10690
browser/package-lock.json
generated
10690
browser/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -28,71 +28,70 @@
|
||||
},
|
||||
"homepage": "https://github.com/minio/minio",
|
||||
"devDependencies": {
|
||||
"async": "^1.5.2",
|
||||
"async": "^3.2.0",
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.3",
|
||||
"babel-jest": "^22.1.0",
|
||||
"babel-jest": "^23.6.0",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-syntax-object-rest-spread": "^6.13.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.8.0",
|
||||
"babel-polyfill": "^6.23.0",
|
||||
"babel-plugin-transform-object-rest-spread": "^6.26.0",
|
||||
"babel-polyfill": "^6.26.0",
|
||||
"babel-preset-es2015": "^6.14.0",
|
||||
"babel-preset-react": "^6.11.1",
|
||||
"babel-register": "^6.26.0",
|
||||
"copy-webpack-plugin": "^4.6.0",
|
||||
"css-loader": "^0.23.1",
|
||||
"enzyme": "^3.10.0",
|
||||
"enzyme-adapter-react-16": "^1.1.1",
|
||||
"esformatter": "^0.10.0",
|
||||
"esformatter-jsx": "^7.4.1",
|
||||
"copy-webpack-plugin": "^6.0.1",
|
||||
"css-loader": "^3.5.3",
|
||||
"enzyme": "^3.11.0",
|
||||
"enzyme-adapter-react-16": "^1.15.2",
|
||||
"esformatter": "^0.11.3",
|
||||
"esformatter-jsx": "^8.0.1",
|
||||
"esformatter-jsx-ignore": "^1.0.6",
|
||||
"html-webpack-plugin": "^3.2.0",
|
||||
"jest": "^22.1.4",
|
||||
"jest-enzyme": "^4.0.2",
|
||||
"json-loader": "^0.5.4",
|
||||
"less": "^3.9.0",
|
||||
"less-loader": "^4.1.0",
|
||||
"purgecss-webpack-plugin": "^1.4.0",
|
||||
"style-loader": "^0.13.1",
|
||||
"url-loader": "^0.5.7",
|
||||
"webpack-cli": "^3.2.0",
|
||||
"webpack-dev-server": "^3.1.14"
|
||||
"html-webpack-plugin": "^4.3.0",
|
||||
"jest": "^23.6.0",
|
||||
"jest-enzyme": "^7.1.2",
|
||||
"json-loader": "^0.5.7",
|
||||
"less": "^3.11.1",
|
||||
"less-loader": "^6.1.0",
|
||||
"purgecss-webpack-plugin": "^2.2.0",
|
||||
"style-loader": "^1.2.1",
|
||||
"url-loader": "^4.1.0",
|
||||
"webpack-cli": "^3.3.11",
|
||||
"webpack-dev-server": "^3.11.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-free": "^5.10.0",
|
||||
"@fortawesome/fontawesome-free": "^5.13.0",
|
||||
"bootstrap": "^3.4.1",
|
||||
"classnames": "^2.2.3",
|
||||
"core-js": "^3.2.1",
|
||||
"expect": "^1.20.2",
|
||||
"glob-all": "^3.1.0",
|
||||
"history": "^4.7.2",
|
||||
"classnames": "^2.2.6",
|
||||
"core-js": "^3.6.5",
|
||||
"expect": "^26.0.1",
|
||||
"glob-all": "^3.2.1",
|
||||
"history": "^4.10.1",
|
||||
"humanize": "0.0.9",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
"json-loader": "^0.5.4",
|
||||
"jwt-decode": "^2.2.0",
|
||||
"local-storage-fallback": "^4.0.2",
|
||||
"local-storage-fallback": "^4.1.1",
|
||||
"material-design-iconic-font": "^2.2.0",
|
||||
"mime-db": "^1.25.0",
|
||||
"mime-types": "^2.1.13",
|
||||
"moment": "^2.24.0",
|
||||
"query-string": "^6.8.2",
|
||||
"react": "^16.2.0",
|
||||
"react-addons-test-utils": "^0.14.8",
|
||||
"react-bootstrap": "^0.32.0",
|
||||
"react-copy-to-clipboard": "^5.0.1",
|
||||
"mime-db": "^1.44.0",
|
||||
"mime-types": "^2.1.27",
|
||||
"moment": "^2.26.0",
|
||||
"query-string": "^6.12.1",
|
||||
"react": "^16.13.1",
|
||||
"react-addons-test-utils": "^15.6.2",
|
||||
"react-bootstrap": "^0.32.4",
|
||||
"react-copy-to-clipboard": "^5.0.2",
|
||||
"react-custom-scrollbars": "^4.2.1",
|
||||
"react-dom": "^16.2.0",
|
||||
"react-dropzone": "^4.2.3",
|
||||
"react-infinite-scroller": "^1.0.6",
|
||||
"react-dom": "^16.13.1",
|
||||
"react-dropzone": "^11.0.1",
|
||||
"react-infinite-scroller": "^1.2.4",
|
||||
"react-onclickout": "^2.0.8",
|
||||
"react-redux": "^5.0.6",
|
||||
"react-router-dom": "^4.2.0",
|
||||
"redux": "^3.7.2",
|
||||
"redux-mock-store": "^1.5.1",
|
||||
"redux-thunk": "^2.2.0",
|
||||
"reselect": "^3.0.1",
|
||||
"superagent": "^3.8.2",
|
||||
"react-redux": "^5.1.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"redux": "^4.0.5",
|
||||
"redux-mock-store": "^1.5.4",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"reselect": "^4.0.0",
|
||||
"superagent": "^5.2.2",
|
||||
"superagent-es6-promise": "^1.0.0",
|
||||
"webpack": "^4.28.3"
|
||||
"webpack": "^4.43.0"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -73,17 +73,17 @@ var exports = {
|
||||
},
|
||||
proxy: {
|
||||
'/minio/webrpc': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
target: 'http://localhost:9000',
|
||||
secure: false,
|
||||
headers: {'Host': "localhost:9000"}
|
||||
},
|
||||
'/minio/upload/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/download/*': {
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
target: 'http://localhost:9000',
|
||||
secure: false
|
||||
},
|
||||
'/minio/zip': {
|
||||
target: 'http://localhost:9000',
|
||||
@@ -92,7 +92,7 @@ var exports = {
|
||||
}
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -102,7 +102,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -67,7 +67,7 @@ var exports = {
|
||||
fs:'empty'
|
||||
},
|
||||
plugins: [
|
||||
new CopyWebpackPlugin([
|
||||
new CopyWebpackPlugin({patterns: [
|
||||
{from: 'app/css/loader.css'},
|
||||
{from: 'app/img/browsers/chrome.png'},
|
||||
{from: 'app/img/browsers/firefox.png'},
|
||||
@@ -77,7 +77,7 @@ var exports = {
|
||||
{from: 'app/img/favicon/favicon-32x32.png'},
|
||||
{from: 'app/img/favicon/favicon-96x96.png'},
|
||||
{from: 'app/index.html'}
|
||||
]),
|
||||
]}),
|
||||
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
|
||||
new PurgecssPlugin({
|
||||
paths: glob.sync([
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -45,88 +45,63 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
|
||||
function start_minio_fs()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 10
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets()
|
||||
{
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 15
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
start_minio_fs
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -138,12 +113,12 @@ function run_test_fs()
|
||||
}
|
||||
|
||||
function run_test_erasure_sets() {
|
||||
minio_pid="$(start_minio_erasure_sets)"
|
||||
start_minio_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -154,44 +129,14 @@ function run_test_erasure_sets() {
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-v6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
start_minio_zone_erasure_sets
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -210,16 +155,14 @@ function run_test_zone_erasure_sets()
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
start_minio_zone_erasure_sets_ipv6
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -238,12 +181,12 @@ function run_test_zone_erasure_sets_ipv6()
|
||||
|
||||
function run_test_erasure()
|
||||
{
|
||||
minio_pid="$(start_minio_erasure)"
|
||||
start_minio_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
@@ -256,14 +199,12 @@ function run_test_erasure()
|
||||
|
||||
function run_test_dist_erasure()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure) )
|
||||
start_minio_dist_erasure
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
|
||||
@@ -33,6 +33,7 @@ function start_minio_3_node() {
|
||||
declare -a ARGS
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
for i in $(seq 1 3); do
|
||||
|
||||
@@ -100,18 +100,18 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -159,6 +159,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
},
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -214,18 +215,18 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
299
cmd/admin-bucket-handlers.go
Normal file
299
cmd/admin-bucket-handlers.go
Normal file
@@ -0,0 +1,299 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketQuotaConfigFile = "quota.json"
|
||||
bucketTargetsFile = "bucket-targets.json"
|
||||
)
|
||||
|
||||
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
||||
// ----------
|
||||
// Places a quota configuration on the specified bucket. The quota
|
||||
// specified in the quota configuration will be applied by default
|
||||
// to enforce total quota for the specified bucket.
|
||||
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Turn off quota commands if data usage info is unavailable.
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
||||
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketQuotaConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
||||
// SetRemoteTargetHandler - sets a remote target for bucket
|
||||
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetBucketTarget", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, _, _, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
password := cred.SecretKey
|
||||
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
|
||||
if sameTarget && bucket == target.TargetBucket {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
target.SourceBucket = bucket
|
||||
if !update {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(target.Arn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
|
||||
// for a particular ARN type
|
||||
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketTargets")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListBucketTargets", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arnType := vars["type"]
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
|
||||
data, err := json.Marshal(targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
|
||||
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveBucketTarget")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveBucketTarget", mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
@@ -60,7 +60,9 @@ func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DelConfigKVHandler")
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -100,7 +102,9 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKVHandler")
|
||||
ctx := newContext(r, w, "SetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -132,7 +136,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -159,7 +163,9 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKVHandler")
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -167,7 +173,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
cfg := globalServerConfig
|
||||
if globalSafeMode {
|
||||
if newObjectLayerFn() == nil {
|
||||
var err error
|
||||
cfg, err = getValidConfig(objectAPI)
|
||||
if err != nil {
|
||||
@@ -195,7 +201,9 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -230,7 +238,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
|
||||
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -261,7 +271,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -276,7 +286,9 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
|
||||
// ListConfigHistoryKVHandler - lists all the KV ids.
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
|
||||
ctx := newContext(r, w, "ListConfigHistoryKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -314,7 +326,9 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKVHandler")
|
||||
ctx := newContext(r, w, "HelpConfigKV")
|
||||
|
||||
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -340,7 +354,9 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigHandler")
|
||||
ctx := newContext(r, w, "SetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
@@ -367,7 +383,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCount()); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -395,7 +411,9 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
// GetConfigHandler - GET /minio/admin/v3/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigHandler")
|
||||
ctx := newContext(r, w, "GetConfig")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
|
||||
@@ -35,8 +35,8 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
@@ -55,6 +55,8 @@ func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -91,6 +93,8 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -123,14 +127,43 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
name := vars["accessKey"]
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accessKey = cred.ParentUser
|
||||
}
|
||||
|
||||
implicitPerm := name == accessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -150,6 +183,8 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -193,6 +228,8 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -220,6 +257,8 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -244,6 +283,8 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -279,6 +320,8 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -288,7 +331,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
@@ -312,20 +355,49 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsTemp() || cred.IsServiceAccount() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Not allowed to add a user with same access key as root credential
|
||||
if owner && accessKey == cred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := accessKey == cred.AccessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -365,9 +437,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -378,6 +452,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
@@ -391,12 +471,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
@@ -442,9 +516,11 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -461,7 +537,12 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, cred.AccessKey)
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -490,9 +571,11 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteServiceAccount")
|
||||
|
||||
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -521,8 +604,15 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if cred.AccessKey != user || cred.ParentUser != user {
|
||||
// The service account belongs to another user but return not found error to mitigate brute force attacks.
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if parentUser != user || user == "" {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -536,10 +626,126 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// AccountInfoHandler returns usage
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AccountInfo")
|
||||
|
||||
defer logger.AuditLog(w, r, "AccountInfo", mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// log the error, continue with the accounting response
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
acctInfo := madmin.AccountInfo{
|
||||
AccountName: accountName,
|
||||
Policy: globalIAMSys.GetCombinedPolicy(policies...),
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
var size uint64
|
||||
// Fetch the data usage of the current bucket
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
}
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created,
|
||||
Size: size,
|
||||
Access: madmin.AccountAccess{
|
||||
Read: rd,
|
||||
Write: wr,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
usageInfoJSON, err := json.Marshal(acctInfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, usageInfoJSON)
|
||||
}
|
||||
|
||||
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
|
||||
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicyV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -565,6 +771,8 @@ func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -576,7 +784,10 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(policy)
|
||||
if err = json.NewEncoder(w).Encode(policy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
@@ -584,6 +795,8 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPoliciesV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -616,6 +829,8 @@ func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Re
|
||||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -648,6 +863,8 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -674,6 +891,8 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -724,6 +943,8 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,27 +33,27 @@ import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
// adminErasureTestBed - encapsulates subsystems that need to be setup for
|
||||
// admin-handler unit tests.
|
||||
type adminXLTestBed struct {
|
||||
xlDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
type adminErasureTestBed struct {
|
||||
erasureDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// prepareAdminXLTestBed - helper function that setups a single-node
|
||||
// XL backend for admin-handler tests.
|
||||
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
|
||||
// prepareAdminErasureTestBed - helper function that setups a single-node
|
||||
// Erasure backend for admin-handler tests.
|
||||
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// Set globalIsErasure to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
globalIsErasure = true
|
||||
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
|
||||
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
return nil, xlErr
|
||||
}
|
||||
@@ -66,58 +66,47 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
newAllSubsystems()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(ctx, objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(buckets, objLayer)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(globalEndpoints)
|
||||
globalNotificationSys.Init(buckets, objLayer)
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
registerAdminRouter(adminRouter, true, true)
|
||||
|
||||
return &adminXLTestBed{
|
||||
xlDirs: xlDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
return &adminErasureTestBed{
|
||||
erasureDirs: erasureDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminXLTestBed) TearDown() {
|
||||
removeRoots(atb.xlDirs)
|
||||
func (atb *adminErasureTestBed) TearDown() {
|
||||
removeRoots(atb.erasureDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
// initTestObjLayer - Helper function to initialize an Erasure-based object
|
||||
// layer and set globalObjectAPI.
|
||||
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
xlDirs, err := getRandomDisks(16)
|
||||
func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
erasureDirs, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
|
||||
endpoints := mustGetNewEndpoints(erasureDirs...)
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(xlDirs)
|
||||
removeRoots(erasureDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
|
||||
objLayer := &erasureServerPools{serverPools: make([]*erasureSets, 1)}
|
||||
objLayer.serverPools[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -126,7 +115,7 @@ func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
globalObjLayerMutex.Unlock()
|
||||
return objLayer, xlDirs, nil
|
||||
return objLayer, erasureDirs, nil
|
||||
}
|
||||
|
||||
// cmdType - Represents different service subcomands like status, stop
|
||||
@@ -195,9 +184,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminXLTestBed(ctx)
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
@@ -266,9 +255,9 @@ func TestAdminServerInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminXLTestBed(ctx)
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
|
||||
}
|
||||
|
||||
defer adminTestBed.TearDown()
|
||||
@@ -310,7 +299,7 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
||||
}{
|
||||
// 1. Server not in quorum.
|
||||
{
|
||||
err: errXLWriteQuorum,
|
||||
err: errErasureWriteQuorum,
|
||||
expectedAPIErr: ErrAdminConfigNoQuorum,
|
||||
},
|
||||
// 2. No error.
|
||||
@@ -338,13 +327,13 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
if clientToken != "" {
|
||||
v.Add(string(mgmtClientToken), clientToken)
|
||||
v.Add(mgmtClientToken, clientToken)
|
||||
}
|
||||
if forceStart {
|
||||
v.Add(string(mgmtForceStart), "")
|
||||
v.Add(mgmtForceStart, "")
|
||||
}
|
||||
if forceStop {
|
||||
v.Add(string(mgmtForceStop), "")
|
||||
v.Add(mgmtForceStop, "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -362,11 +351,11 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
varsArr := []map[string]string{
|
||||
// Invalid cases
|
||||
{string(mgmtPrefix): "objprefix"},
|
||||
{mgmtPrefix: "objprefix"},
|
||||
// Valid cases
|
||||
{},
|
||||
{string(mgmtBucket): "bucket"},
|
||||
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
|
||||
{mgmtBucket: "bucket"},
|
||||
{mgmtBucket: "bucket", mgmtPrefix: "objprefix"},
|
||||
}
|
||||
|
||||
// Body is always valid - we do not test JSON decoding.
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -60,12 +59,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
return fmt.Errorf("Heal internal error: %s: %s",
|
||||
apiErr.Code, apiErr.Description)
|
||||
}
|
||||
@@ -73,18 +71,11 @@ var (
|
||||
|
||||
// healSequenceStatus - accumulated status of the heal sequence
|
||||
type healSequenceStatus struct {
|
||||
// lock to update this structure as it is concurrently
|
||||
// accessed
|
||||
updateLock *sync.RWMutex
|
||||
|
||||
// summary and detail for failures
|
||||
Summary healStatusSummary `json:"Summary"`
|
||||
FailureDetail string `json:"Detail,omitempty"`
|
||||
StartTime time.Time `json:"StartTime"`
|
||||
|
||||
// disk information
|
||||
NumDisks int `json:"NumDisks"`
|
||||
|
||||
// settings for the heal sequence
|
||||
HealSettings madmin.HealOpts `json:"Settings"`
|
||||
|
||||
@@ -94,16 +85,18 @@ type healSequenceStatus struct {
|
||||
|
||||
// structure to hold state of all heal sequences in server memory
|
||||
type allHealState struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healSeqMap map[string]*healSequence
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
}
|
||||
|
||||
// initHealState - initialize healing apparatus
|
||||
func initHealState() *allHealState {
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState() *allHealState {
|
||||
healState := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
}
|
||||
|
||||
go healState.periodicHealSeqsClean(GlobalContext)
|
||||
@@ -111,14 +104,48 @@ func initHealState() *allHealState {
|
||||
return healState
|
||||
}
|
||||
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) getHealLocalDisks() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
delete(ahs.healLocalDisks, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// Launch clean-up routine to remove this heal sequence (after
|
||||
// it ends) from the global state after timeout has elapsed.
|
||||
ticker := time.NewTicker(time.Minute * 5)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-time.After(time.Minute * 5):
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -162,12 +189,17 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
he, exists := ahs.getHealSequence(path)
|
||||
if !exists {
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: "invalid",
|
||||
ClientToken: "unknown",
|
||||
StartTime: UTCNow(),
|
||||
}
|
||||
} else {
|
||||
clientToken := he.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s@%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
hsp = madmin.HealStopSuccess{
|
||||
ClientToken: he.clientToken,
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: he.clientAddress,
|
||||
StartTime: he.startTime,
|
||||
}
|
||||
@@ -196,26 +228,17 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// `keepHealSeqStateDuration`. This function also launches a
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
existsAndLive := false
|
||||
he, exists := ahs.getHealSequence(h.path)
|
||||
if exists {
|
||||
if !he.hasEnded() || len(he.currentStatus.Items) > 0 {
|
||||
existsAndLive = true
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
return respBytes, apiErr, ""
|
||||
}
|
||||
}
|
||||
|
||||
if existsAndLive {
|
||||
// A heal sequence exists on the given path.
|
||||
if h.forceStarted {
|
||||
// stop the running heal sequence - wait for it to finish.
|
||||
he.stop()
|
||||
for !he.hasEnded() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
} else {
|
||||
} else {
|
||||
oh, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
|
||||
if exists && !oh.hasEnded() {
|
||||
errMsg = "Heal is already running on the given path " +
|
||||
"(use force-start option to stop and start afresh). " +
|
||||
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
|
||||
@@ -229,10 +252,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
|
||||
// Check if new heal sequence to be started overlaps with any
|
||||
// existing, running sequence
|
||||
hpath := pathJoin(h.bucket, h.object)
|
||||
for k, hSeq := range ahs.healSeqMap {
|
||||
if !hSeq.hasEnded() && (strings.HasPrefix(k, h.path) ||
|
||||
strings.HasPrefix(h.path, k)) {
|
||||
|
||||
if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
|
||||
errMsg = "The provided heal sequence path overlaps with an existing " +
|
||||
fmt.Sprintf("heal path: %s", k)
|
||||
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
|
||||
@@ -240,19 +262,24 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
}
|
||||
|
||||
// Add heal state and start sequence
|
||||
ahs.healSeqMap[h.path] = h
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart()
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s@%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: h.clientToken,
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, toAPIError(h.ctx, err), ""
|
||||
return nil, toAdminAPIErr(h.ctx, err), ""
|
||||
}
|
||||
return b, noError, ""
|
||||
}
|
||||
@@ -261,14 +288,17 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
|
||||
// status results from global state and returns its JSON
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(path)
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
// If there is no such heal sequence, return error.
|
||||
return nil, ErrHealNoSuchProcess
|
||||
// heal sequence doesn't exist, must have finished.
|
||||
jbytes, err := json.Marshal(healSequenceStatus{
|
||||
Summary: healFinishedStatus,
|
||||
})
|
||||
return jbytes, toAdminAPIErrCode(GlobalContext, err)
|
||||
}
|
||||
|
||||
// Check if client-token is valid
|
||||
@@ -277,8 +307,8 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
}
|
||||
|
||||
// Take lock to access and update the heal-sequence
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
numItems := len(h.currentStatus.Items)
|
||||
|
||||
@@ -289,41 +319,41 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
|
||||
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
|
||||
}
|
||||
|
||||
// After sending status to client, and before relinquishing
|
||||
// the updateLock, reset Item to nil and record the result
|
||||
// index sent to the client.
|
||||
defer func(i int64) {
|
||||
h.lastSentResultIndex = i
|
||||
h.currentStatus.Items = nil
|
||||
}(lastResultIndex)
|
||||
h.lastSentResultIndex = lastResultIndex
|
||||
|
||||
jbytes, err := json.Marshal(h.currentStatus)
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
return jbytes, ErrNone
|
||||
}
|
||||
|
||||
// healSource denotes single entity and heal option.
|
||||
type healSource struct {
|
||||
path string // entity path (format, buckets, objects) to heal
|
||||
opts *madmin.HealOpts // optional heal option overrides default setting
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts *madmin.HealOpts // optional heal option overrides default setting
|
||||
}
|
||||
|
||||
// healSequence - state for each heal sequence initiated on the
|
||||
// server.
|
||||
type healSequence struct {
|
||||
// bucket, and prefix on which heal seq. was initiated
|
||||
bucket, objPrefix string
|
||||
// bucket, and object on which heal seq. was initiated
|
||||
bucket, object string
|
||||
|
||||
// path is just pathJoin(bucket, objPrefix)
|
||||
path string
|
||||
|
||||
// List of entities (format, buckets, objects) to heal
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
// Report healing progress
|
||||
reportProgress bool
|
||||
|
||||
@@ -349,9 +379,8 @@ type healSequence struct {
|
||||
// completed
|
||||
traverseAndHealDoneCh chan error
|
||||
|
||||
// channel to signal heal sequence to stop (e.g. from the
|
||||
// heal-stop API)
|
||||
stopSignalCh chan struct{}
|
||||
// canceler to cancel heal sequence.
|
||||
cancelCtx context.CancelFunc
|
||||
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
@@ -377,31 +406,31 @@ type healSequence struct {
|
||||
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
|
||||
clientToken := mustGetUUID()
|
||||
|
||||
return &healSequence{
|
||||
respCh: make(chan healResult),
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
path: pathJoin(bucket, objPrefix),
|
||||
object: objPrefix,
|
||||
reportProgress: true,
|
||||
startTime: UTCNow(),
|
||||
clientToken: mustGetUUID(),
|
||||
clientToken: clientToken,
|
||||
clientAddress: clientAddr,
|
||||
forceStarted: forceStart,
|
||||
settings: hs,
|
||||
currentStatus: healSequenceStatus{
|
||||
Summary: healNotStartedStatus,
|
||||
HealSettings: hs,
|
||||
NumDisks: numDisks,
|
||||
updateLock: &sync.RWMutex{},
|
||||
},
|
||||
traverseAndHealDoneCh: make(chan error),
|
||||
stopSignalCh: make(chan struct{}),
|
||||
cancelCtx: cancel,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
@@ -484,7 +513,7 @@ func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -493,19 +522,18 @@ func (h *healSequence) isQuitting() bool {
|
||||
|
||||
// check if the heal sequence has ended
|
||||
func (h *healSequence) hasEnded() bool {
|
||||
h.currentStatus.updateLock.RLock()
|
||||
summary := h.currentStatus.Summary
|
||||
h.currentStatus.updateLock.RUnlock()
|
||||
return summary == healStoppedStatus || summary == healFinishedStatus
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
// background heal never ends
|
||||
if h.clientToken == bgHealingUUID {
|
||||
return false
|
||||
}
|
||||
return !h.endTime.IsZero()
|
||||
}
|
||||
|
||||
// stops the heal sequence - safe to call multiple times.
|
||||
func (h *healSequence) stop() {
|
||||
select {
|
||||
case <-h.stopSignalCh:
|
||||
default:
|
||||
close(h.stopSignalCh)
|
||||
}
|
||||
h.cancelCtx()
|
||||
}
|
||||
|
||||
// pushHealResultItem - pushes a heal result item for consumption in
|
||||
@@ -532,29 +560,27 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
itemsLen = len(h.currentStatus.Items)
|
||||
if itemsLen == maxUnconsumedHealResultItems {
|
||||
// unlock and wait to check again if we can push
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// wait for a second, or quit if an external
|
||||
// stop signal is received or the
|
||||
// unconsumedTimer fires.
|
||||
select {
|
||||
// Check after a second
|
||||
case <-time.After(time.Second):
|
||||
h.mutex.Unlock()
|
||||
continue
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Unlock()
|
||||
// discard result and return.
|
||||
return errHealPushStopNDiscard
|
||||
return errHealStopSignalled
|
||||
|
||||
// Timeout if no results consumed for too
|
||||
// long.
|
||||
// Timeout if no results consumed for too long.
|
||||
case <-unconsumedTimer.C:
|
||||
h.mutex.Unlock()
|
||||
return errHealIdleTimeout
|
||||
|
||||
}
|
||||
}
|
||||
break
|
||||
@@ -571,13 +597,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
h.currentStatus.Items = append(h.currentStatus.Items, r)
|
||||
|
||||
// release lock
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
|
||||
// This is a "safe" point for the heal sequence to quit if
|
||||
// signaled externally.
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
h.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -589,40 +609,41 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// routine for completion, and (2) listens for external stop
|
||||
// signals. When either event happens, it sets the finish status for
|
||||
// the heal-sequence.
|
||||
func (h *healSequence) healSequenceStart() {
|
||||
func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
// Set status as running
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.mutex.Lock()
|
||||
h.currentStatus.Summary = healRunningStatus
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal()
|
||||
go h.traverseAndHeal(objAPI)
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
defer h.currentStatus.updateLock.Unlock()
|
||||
// Heal traversal is complete.
|
||||
if ok {
|
||||
if err == nil {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
} else {
|
||||
// heal traversal had an error.
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = err.Error()
|
||||
} else {
|
||||
// heal traversal succeeded.
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
}
|
||||
|
||||
case <-h.stopSignalCh:
|
||||
h.mutex.Unlock()
|
||||
case <-h.ctx.Done():
|
||||
h.mutex.Lock()
|
||||
h.endTime = UTCNow()
|
||||
h.currentStatus.updateLock.Lock()
|
||||
h.currentStatus.Summary = healStoppedStatus
|
||||
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
|
||||
h.currentStatus.updateLock.Unlock()
|
||||
h.currentStatus.Summary = healFinishedStatus
|
||||
h.mutex.Unlock()
|
||||
|
||||
// drain traverse channel so the traversal
|
||||
// go-routine does not leak.
|
||||
@@ -636,84 +657,114 @@ func (h *healSequence) healSequenceStart() {
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
var respCh = make(chan healResult)
|
||||
defer close(respCh)
|
||||
opts := globalHealConfig
|
||||
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
path: source.path,
|
||||
responseCh: respCh,
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
}
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
// Wait for answer and push result to the client
|
||||
res := <-respCh
|
||||
if !h.reportProgress {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil && !isErrObjectNotFound(res.err) {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
bucketsOnly := true // heal buckets only, not objects.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case source := <-h.sourceCh:
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case source.path == nopHeal:
|
||||
case source.bucket == nopHeal:
|
||||
continue
|
||||
case source.path == SlashSeparator:
|
||||
case source.bucket == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(source.path, SlashSeparator):
|
||||
case source.bucket != "" && source.object == "":
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
|
||||
h.scannedItemsMap[itemType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
case <-h.traverseAndHealDoneCh:
|
||||
return nil
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -722,27 +773,25 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// Start with format healing
|
||||
if err := h.healDiskFormat(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
// Start healing the config prefix.
|
||||
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
|
||||
if err := h.healMinioSysMeta(objAPI, minioConfigPrefix)(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start healing the bucket config prefix.
|
||||
if err := h.healMinioSysMeta(bucketConfigPrefix)(); err != nil {
|
||||
return h.healMinioSysMeta(objAPI, bucketConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(bucketsOnly)
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -752,43 +801,35 @@ func (h *healSequence) healItems(bucketsOnly bool) error {
|
||||
// quit signal is received, this routine cannot quit immediately and
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal() {
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
if err := h.healItems(bucketsOnly); err != nil {
|
||||
if h.isQuitting() {
|
||||
err = errHealStopSignalled
|
||||
}
|
||||
h.traverseAndHealDoneCh <- err
|
||||
}
|
||||
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
// which in-turn heals the respective meta directory path and any files in int.
|
||||
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
||||
func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) func() error {
|
||||
return func() error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(herr) {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return herr
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -800,39 +841,27 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
buckets, err := objAPI.ListBucketsHeal(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -841,15 +870,11 @@ func (h *healSequence) healBuckets(bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
|
||||
return err
|
||||
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -857,12 +882,15 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.objPrefix != "" {
|
||||
if h.object != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.objPrefix); err != nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -871,23 +899,26 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object string) error {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
|
||||
err := h.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -50,7 +52,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceActionHandler)).Queries("action", "{action:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
@@ -62,9 +64,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountingusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountingUsageInfoHandler))
|
||||
|
||||
if globalIsDistXL || globalIsXL {
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
@@ -115,6 +115,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
|
||||
|
||||
// Add user IAM
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
|
||||
@@ -166,12 +169,33 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
}
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Quota operations
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
}
|
||||
}
|
||||
// -- Top APIs --
|
||||
// Top locks
|
||||
if globalIsDistXL {
|
||||
if globalIsDistErasure {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
}
|
||||
|
||||
@@ -183,26 +207,22 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// -- KMS APIs --
|
||||
//
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(httpTraceAll(adminAPI.KMSCreateKeyHandler)).Queries("key-id", "{key-id:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
|
||||
|
||||
if !globalIsGateway {
|
||||
// -- OBD API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
|
||||
Queries("perfdrive", "{perfdrive:true|false}",
|
||||
"perfnet", "{perfnet:true|false}",
|
||||
"minioinfo", "{minioinfo:true|false}",
|
||||
"minioconfig", "{minioconfig:true|false}",
|
||||
"syscpu", "{syscpu:true|false}",
|
||||
"sysdiskhw", "{sysdiskhw:true|false}",
|
||||
"sysosinfo", "{sysosinfo:true|false}",
|
||||
"sysmem", "{sysmem:true|false}",
|
||||
"sysprocess", "{sysprocess:true|false}",
|
||||
)
|
||||
// Keep obdinfo for backward compatibility with mc
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealthInfoHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))
|
||||
}
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||
adminRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
|
||||
adminRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("Admin"))
|
||||
}
|
||||
|
||||
@@ -18,22 +18,19 @@ package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var disks []madmin.Disk
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
if globalIsDistErasure {
|
||||
addr = GetLocalPeer(endpointServerPools)
|
||||
}
|
||||
network := make(map[string]string)
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
@@ -42,33 +39,14 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = "online"
|
||||
var di = madmin.Disk{
|
||||
DrivePath: endpoint.Path,
|
||||
}
|
||||
diInfo, err := disk.GetInfo(endpoint.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
|
||||
di.State = madmin.DriveStateMissing
|
||||
} else {
|
||||
di.State = madmin.DriveStateCorrupt
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := IsServerResolvable(endpoint); err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
di.State = madmin.DriveStateOk
|
||||
di.DrivePath = endpoint.Path
|
||||
di.TotalSpace = diInfo.Total
|
||||
di.UsedSpace = diInfo.Total - diInfo.Free
|
||||
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
|
||||
}
|
||||
disks = append(disks, di)
|
||||
} else {
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
err := IsServerResolvable(endpoint)
|
||||
if err == nil {
|
||||
network[nodeName] = "online"
|
||||
} else {
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
network[nodeName] = "offline"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -81,6 +59,5 @@ func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
Network: network,
|
||||
Disks: disks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,11 +18,53 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ObjectIdentifier carries key name for the object to delete.
|
||||
type ObjectIdentifier struct {
|
||||
// DeletedObject objects deleted
|
||||
type DeletedObject struct {
|
||||
DeleteMarker bool `xml:"DeleteMarker,omitempty"`
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
||||
ObjectName string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
|
||||
// MinIO extensions to support delete marker replication
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
|
||||
// MTime of DeleteMarker on source that needs to be propagated to replica
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
type DeleteMarkerMTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
|
||||
// Version ID of delete marker
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
@@ -37,5 +79,5 @@ type DeleteObjectsRequest struct {
|
||||
// Element to enable quiet mode for the request
|
||||
Quiet bool
|
||||
// List of objects to be deleted
|
||||
Objects []ObjectIdentifier `xml:"Object"`
|
||||
Objects []ObjectToDelete `xml:"Object"`
|
||||
}
|
||||
|
||||
@@ -27,15 +27,18 @@ import (
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/object/tagging"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
@@ -100,8 +103,28 @@ const (
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrRemoteDestinationNotFoundError
|
||||
ErrReplicationDestinationMissingLock
|
||||
ErrRemoteTargetNotFoundError
|
||||
ErrReplicationRemoteConnectionError
|
||||
ErrBucketRemoteIdenticalToSource
|
||||
ErrBucketRemoteAlreadyExists
|
||||
ErrBucketRemoteLabelInUse
|
||||
ErrBucketRemoteArnTypeInvalid
|
||||
ErrBucketRemoteArnInvalid
|
||||
ErrBucketRemoteRemoveDisallowed
|
||||
ErrRemoteTargetNotVersionedError
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrBucketReplicationDisabledError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidVersionID
|
||||
ErrNoSuchVersion
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
@@ -157,6 +180,7 @@ const (
|
||||
ErrInvalidRetentionDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrUnknownWORMModeDirective
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrInvalidTagDirective
|
||||
// Add new error codes here.
|
||||
@@ -211,6 +235,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrClientDisconnected
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
@@ -235,6 +260,10 @@ const (
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
ErrAdminBucketQuotaDisabled
|
||||
|
||||
ErrHealNotImplemented
|
||||
ErrHealNoSuchProcess
|
||||
@@ -335,6 +364,7 @@ const (
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
@@ -451,7 +481,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrInvalidAccessKeyID: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "The access key ID you provided does not exist in our records.",
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
@@ -529,9 +559,14 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrInvalidVersionID: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid version id specified",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchVersion: {
|
||||
Code: "NoSuchVersion",
|
||||
Description: "Indicates that the version ID specified in the request does not match an existing version.",
|
||||
Description: "The specified version does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNotImplemented: {
|
||||
@@ -639,20 +674,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialDate: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format \"%s\". This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; the region 'us-east-' is wrong; expecting 'us-east-1'".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrMalformedCredentialRegion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; the region is wrong;",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect date format. This date in the credential must be in the format \"yyyyMMdd\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRegion: {
|
||||
@@ -660,9 +684,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Region does not match.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// right Description: "Error parsing the X-Amz-Credential parameter; incorrect service \"s4\". This endpoint belongs to \"s3\".".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidServiceS3: {
|
||||
Code: "AuthorizationParametersError",
|
||||
Description: "Error parsing the Credential/X-Amz-Credential parameter; incorrect service. This endpoint belongs to \"s3\".",
|
||||
@@ -673,9 +694,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Error parsing the Credential parameter; incorrect service. This endpoint belongs to \"sts\".",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// FIXME: Should contain the invalid param set as seen in https://github.com/minio/minio/issues/2385.
|
||||
// Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal "aws4_reque". This endpoint uses "aws4_request".
|
||||
// Need changes to make sure variable messages can be constructed.
|
||||
ErrInvalidRequestVersion: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "Error parsing the X-Amz-Credential parameter; incorrect terminal. This endpoint uses \"aws4_request\".",
|
||||
@@ -746,8 +764,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Your key is too long",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
|
||||
ErrUnsignedHeaders: {
|
||||
Code: "AccessDenied",
|
||||
Description: "There were headers present in the request which were not signed",
|
||||
@@ -773,6 +789,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketTaggingNotFound: {
|
||||
Code: "NoSuchTagSet",
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrObjectLockConfigurationNotFound: {
|
||||
Code: "ObjectLockConfigurationNotFoundError",
|
||||
Description: "Object Lock configuration does not exist for this bucket",
|
||||
@@ -783,6 +804,96 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Object Lock configuration cannot be enabled on existing buckets",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrNoSuchCORSConfiguration: {
|
||||
Code: "NoSuchCORSConfiguration",
|
||||
Description: "The CORS configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchWebsiteConfiguration: {
|
||||
Code: "NoSuchWebsiteConfiguration",
|
||||
Description: "The specified bucket does not have a website configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationConfigurationNotFoundError: {
|
||||
Code: "ReplicationConfigurationNotFoundError",
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrRemoteDestinationNotFoundError: {
|
||||
Code: "RemoteDestinationNotFoundError",
|
||||
Description: "The remote destination bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationDestinationMissingLock: {
|
||||
Code: "ReplicationDestinationMissingLockError",
|
||||
Description: "The replication destination bucket does not have object locking enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetNotFoundError: {
|
||||
Code: "XMinioAdminRemoteTargetNotFoundError",
|
||||
Description: "The remote target does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error - please check remote service credentials and target bucket",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteAlreadyExists: {
|
||||
Code: "XMinioAdminBucketRemoteAlreadyExists",
|
||||
Description: "The remote target already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteLabelInUse: {
|
||||
Code: "XMinioAdminBucketRemoteLabelInUse",
|
||||
Description: "The remote target with this label already exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteRemoveDisallowed: {
|
||||
Code: "XMinioAdminRemoteRemoveDisallowed",
|
||||
Description: "This ARN is in use by an existing configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnTypeInvalid: {
|
||||
Code: "XMinioAdminRemoteARNTypeInvalid",
|
||||
Description: "The bucket remote ARN type is not valid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteArnInvalid: {
|
||||
Code: "XMinioAdminRemoteArnInvalid",
|
||||
Description: "The bucket remote ARN does not have correct format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetNotVersionedError: {
|
||||
Code: "RemoteTargetNotVersionedError",
|
||||
Description: "The remote target does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationSourceNotVersionedError: {
|
||||
Code: "ReplicationSourceNotVersionedError",
|
||||
Description: "The replication source does not have versioning enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationBucketNeedsVersioningError: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketReplicationDisabledError: {
|
||||
Code: "XMinioAdminBucketReplicationDisabled",
|
||||
Description: "Replication specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
@@ -813,6 +924,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectRestoreAlreadyInProgress: {
|
||||
Code: "RestoreAlreadyInProgress",
|
||||
Description: "Object restore is already in progress",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -875,7 +991,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMetadataTooLarge: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "MetadataTooLarge",
|
||||
Description: "Your metadata headers exceed the maximum allowed metadata size.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -1089,20 +1205,40 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrAdminBucketQuotaExceeded: {
|
||||
Code: "XMinioAdminBucketQuotaExceeded",
|
||||
Description: "Bucket quota exceeded",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchQuotaConfiguration: {
|
||||
Code: "XMinioAdminNoSuchQuotaConfiguration",
|
||||
Description: "The quota configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminBucketQuotaDisabled: {
|
||||
Code: "XMinioAdminBucketQuotaDisabled",
|
||||
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureClientRequest: {
|
||||
Code: "XMinioInsecureClientRequest",
|
||||
Description: "Cannot respond to plain-text request from TLS-encrypted server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrOperationTimedOut: {
|
||||
Code: "XMinioServerTimedOut",
|
||||
Description: "A timeout occurred while trying to lock a resource",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
Code: "RequestTimeout",
|
||||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrClientDisconnected: {
|
||||
Code: "ClientDisconnected",
|
||||
Description: "Client disconnected before response was ready",
|
||||
HTTPStatusCode: 499, // No official code, use nginx value.
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "XMinioServerTimedOut",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request",
|
||||
HTTPStatusCode: http.StatusRequestTimeout,
|
||||
Code: "SlowDown",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -1597,12 +1733,17 @@ var errorCodes = errorCodeMap{
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The account key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
@@ -1624,7 +1765,17 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
if err == nil {
|
||||
return ErrNone
|
||||
}
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrOperationTimedOut
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.Canceled {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
@@ -1739,6 +1890,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBucketAlreadyOwnedByYou
|
||||
case ObjectNotFound:
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
apiErr = ErrNoSuchVersion
|
||||
case ObjectAlreadyExists:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectNameInvalid:
|
||||
@@ -1783,6 +1940,38 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchLifecycleConfiguration
|
||||
case BucketSSEConfigNotFound:
|
||||
apiErr = ErrNoSuchBucketSSEConfig
|
||||
case BucketTaggingNotFound:
|
||||
apiErr = ErrBucketTaggingNotFound
|
||||
case BucketObjectLockConfigNotFound:
|
||||
apiErr = ErrObjectLockConfigurationNotFound
|
||||
case BucketQuotaConfigNotFound:
|
||||
apiErr = ErrAdminNoSuchQuotaConfiguration
|
||||
case BucketReplicationConfigNotFound:
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
case BucketRemoteLabelInUse:
|
||||
apiErr = ErrBucketRemoteLabelInUse
|
||||
case BucketRemoteArnTypeInvalid:
|
||||
apiErr = ErrBucketRemoteArnTypeInvalid
|
||||
case BucketRemoteArnInvalid:
|
||||
apiErr = ErrBucketRemoteArnInvalid
|
||||
case BucketRemoteRemoveDisallowed:
|
||||
apiErr = ErrBucketRemoteRemoveDisallowed
|
||||
case BucketRemoteTargetNotVersioned:
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
apiErr = ErrEventNotification
|
||||
case *event.ErrInvalidARN:
|
||||
@@ -1811,6 +2000,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrBackendDown
|
||||
case ObjectNameTooLong:
|
||||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
@@ -1847,12 +2038,24 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
switch e := err.(type) {
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRequest].HTTPStatusCode,
|
||||
}
|
||||
case *xml.SyntaxError:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
@@ -1867,13 +2070,25 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case versioning.Error:
|
||||
apiErr = APIError{
|
||||
Code: "IllegalVersioningConfigurationException",
|
||||
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case lifecycle.Error:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRequest",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tagging.Error:
|
||||
case replication.Error:
|
||||
apiErr = APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case tags.Error:
|
||||
apiErr = APIError{
|
||||
Code: e.Code(),
|
||||
Description: e.Error(),
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -37,9 +38,18 @@ func mustGetRequestID(t time.Time) string {
|
||||
return fmt.Sprintf("%X", t.UnixNano())
|
||||
}
|
||||
|
||||
// setEventStreamHeaders to allow proxies to avoid buffering proxy responses
|
||||
func setEventStreamHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
w.Header().Set(xhttp.CacheControl, "no-cache") // nginx to turn off buffering
|
||||
w.Header().Set("X-Accel-Buffering", "no") // nginx to turn off buffering
|
||||
}
|
||||
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set the "Server" http header.
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO")
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
@@ -68,8 +78,15 @@ func encodeResponseJSON(response interface{}) []byte {
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// Write parts count
|
||||
func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header()[xhttp.AmzMpPartsCount] = []string{strconv.Itoa(len(objInfo.Parts))}
|
||||
}
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -82,10 +99,6 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||
}
|
||||
|
||||
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
|
||||
w.Header().Set(xhttp.AmzMpPartsCount, strconv.Itoa(len(objInfo.Parts)))
|
||||
}
|
||||
|
||||
if objInfo.ContentType != "" {
|
||||
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
|
||||
}
|
||||
@@ -104,42 +117,59 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
|
||||
// Set tag count if object has tags
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
tagCount := len(tags)
|
||||
if tagCount != 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := url.ParseQuery(objInfo.UserTags)
|
||||
if len(tags) > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(len(tags))}
|
||||
}
|
||||
}
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
w.Header().Set(k, v)
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
}
|
||||
if !isSet {
|
||||
w.Header().Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
var totalObjectSize int64
|
||||
switch {
|
||||
case crypto.IsEncrypted(objInfo.UserDefined):
|
||||
totalObjectSize, err = objInfo.DecryptedSize()
|
||||
var start, rangeLen int64
|
||||
totalObjectSize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.PartNumber > 0 {
|
||||
var start, end int64
|
||||
for i := 0; i < len(objInfo.Parts) && i < opts.PartNumber; i++ {
|
||||
start = end
|
||||
end = start + objInfo.Parts[i].ActualSize - 1
|
||||
}
|
||||
rs = &HTTPRangeSpec{Start: start, End: end}
|
||||
rangeLen = end - start + 1
|
||||
} else {
|
||||
// for providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case objInfo.IsCompressed():
|
||||
totalObjectSize = objInfo.GetActualSize()
|
||||
if totalObjectSize < 0 {
|
||||
return errInvalidDecompressedSize
|
||||
}
|
||||
default:
|
||||
totalObjectSize = objInfo.Size
|
||||
}
|
||||
|
||||
// for providing ranged content
|
||||
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
@@ -149,5 +179,31 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
w.Header().Set(xhttp.ContentRange, contentRange)
|
||||
}
|
||||
|
||||
// Set the relevant version ID as part of the response header.
|
||||
if objInfo.VersionID != "" {
|
||||
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
||||
}
|
||||
if objInfo.ReplicationStatus.String() != "" {
|
||||
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
|
||||
}
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
})
|
||||
if !expiryTime.IsZero() {
|
||||
w.Header()[xhttp.AmzExpiration] = []string{
|
||||
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -33,10 +34,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 10000 // Limit number of objects in a listObjectsResponse.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -233,10 +236,23 @@ type Bucket struct {
|
||||
|
||||
// ObjectVersion container for object version metadata
|
||||
type ObjectVersion struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
|
||||
Object
|
||||
VersionID string `xml:"VersionId"`
|
||||
IsLatest bool
|
||||
VersionID string `xml:"VersionId"`
|
||||
|
||||
isDeleteMarker bool
|
||||
}
|
||||
|
||||
// MarshalXML - marshal ObjectVersion
|
||||
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if o.isDeleteMarker {
|
||||
start.Name.Local = "DeleteMarker"
|
||||
} else {
|
||||
start.Name.Local = "Version"
|
||||
}
|
||||
|
||||
type objectVersionWrapper ObjectVersion
|
||||
return e.EncodeElement(objectVersionWrapper(o), start)
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string.
|
||||
@@ -331,9 +347,10 @@ type CompleteMultipartUploadResponse struct {
|
||||
|
||||
// DeleteError structure.
|
||||
type DeleteError struct {
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// DeleteObjectsResponse container for multiple object deletes.
|
||||
@@ -341,7 +358,7 @@ type DeleteObjectsResponse struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
||||
|
||||
// Collection of all deleted objects
|
||||
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
|
||||
DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
|
||||
|
||||
// Collection of errors deleting certain objects.
|
||||
Errors []DeleteError `xml:"Error,omitempty"`
|
||||
@@ -380,8 +397,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
if strings.HasPrefix(r.Host, bucket+"."+domain) {
|
||||
u.Path = path.Join(SlashSeparator, object)
|
||||
break
|
||||
}
|
||||
@@ -392,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@@ -400,7 +416,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(timeFormatAMZLong)
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
|
||||
listbuckets = append(listbuckets, listbucket)
|
||||
}
|
||||
|
||||
@@ -411,9 +427,8 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@@ -424,7 +439,7 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
@@ -434,15 +449,18 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
||||
content.Owner = owner
|
||||
content.VersionID = "null"
|
||||
content.IsLatest = true
|
||||
content.VersionID = object.VersionID
|
||||
if content.VersionID == "" {
|
||||
content.VersionID = nullVersionID
|
||||
}
|
||||
content.IsLatest = object.IsLatest
|
||||
content.isDeleteMarker = object.DeleteMarker
|
||||
versions = append(versions, content)
|
||||
}
|
||||
|
||||
data.Name = bucket
|
||||
data.Versions = versions
|
||||
|
||||
data.EncodingType = encodingType
|
||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
||||
@@ -450,8 +468,11 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.NextVersionIDMarker = resp.NextVersionIDMarker
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -463,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@@ -475,7 +495,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
@@ -496,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -510,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@@ -525,7 +545,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZLong)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
@@ -539,11 +559,15 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if HasPrefix(k, ReservedMetadataPrefix) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
@@ -560,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@@ -574,7 +600,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
|
||||
return CopyObjectResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -582,7 +608,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
|
||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
||||
return CopyObjectPartResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(timeFormatAMZLong),
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -601,7 +627,8 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: etag,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -626,7 +653,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.PartNumber = part.PartNumber
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
newPart.Size = part.Size
|
||||
newPart.LastModified = part.LastModified.UTC().Format(timeFormatAMZLong)
|
||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -656,18 +683,21 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(timeFormatAMZLong)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
|
||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
||||
}
|
||||
return listMultipartUploadsResponse
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse {
|
||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse {
|
||||
deleteResp := DeleteObjectsResponse{}
|
||||
if !quiet {
|
||||
deleteResp.DeletedObjects = deletedObjects
|
||||
}
|
||||
if len(errs) == len(deletedObjects) {
|
||||
deleteResp.DeletedObjects = nil
|
||||
}
|
||||
deleteResp.Errors = errs
|
||||
return deleteResp
|
||||
}
|
||||
@@ -731,6 +761,10 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
@@ -766,17 +800,6 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeVersionMismatchResponse - writes custom error responses for version mismatches.
|
||||
func writeVersionMismatchResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, isJSON bool) {
|
||||
if isJSON {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.String(), w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
writeResponse(w, err.HTTPStatusCode, encodeResponseJSON(errorResponse), mimeJSON)
|
||||
} else {
|
||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
@@ -796,38 +819,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseXML - similar to writeErrorResponse,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
|
||||
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: errBody,
|
||||
Resource: reqURL.Path,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
||||
HostID: globalDeploymentID,
|
||||
}
|
||||
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
// Server with virtual domain name.
|
||||
{
|
||||
request: &http.Request{
|
||||
Host: "mys3.bucket.org",
|
||||
Host: "mybucket.mys3.bucket.org",
|
||||
Header: map[string][]string{},
|
||||
},
|
||||
domains: []string{"mys3.bucket.org"},
|
||||
@@ -87,7 +87,7 @@ func TestObjectLocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
request: &http.Request{
|
||||
Host: "mys3.bucket.org",
|
||||
Host: "mybucket.mys3.bucket.org",
|
||||
Header: map[string][]string{
|
||||
"X-Forwarded-Scheme": {httpsScheme},
|
||||
},
|
||||
@@ -98,11 +98,14 @@ func TestObjectLocation(t *testing.T) {
|
||||
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
t.Errorf("expected %s, got %s", testCase.expectedLocation, gotLocation)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,10 +17,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
@@ -29,61 +32,75 @@ func newHTTPServerFn() *xhttp.Server {
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalObjectAPI
|
||||
}
|
||||
|
||||
func newCachedObjectLayerFn() CacheObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
||||
if globalSafeMode {
|
||||
return nil
|
||||
}
|
||||
return globalCacheObjectAPI
|
||||
}
|
||||
|
||||
func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = o
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
// Returns true of handlers should interpret encryption.
|
||||
EncryptionEnabled func() bool
|
||||
// Returns true if handlers allow SSE-KMS encryption headers.
|
||||
AllowSSEKMS func() bool
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
// According to section 14.23 of RFC 2616 the Host header
|
||||
// can include the port number if the default value of 80 is not used.
|
||||
func getHost(r *http.Request) string {
|
||||
if r.URL.IsAbs() {
|
||||
return r.URL.Host
|
||||
}
|
||||
return r.Host
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
EncryptionEnabled: func() bool {
|
||||
return encryptionEnabled
|
||||
},
|
||||
AllowSSEKMS: func() bool {
|
||||
return allowSSEKMS
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName+":{port:.*}").Subrouter())
|
||||
if IsKubernetes() {
|
||||
routers = append(routers, apiRouter.MatcherFunc(func(r *http.Request, match *mux.RouteMatch) bool {
|
||||
host, _, err := net.SplitHostPort(getHost(r))
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
}
|
||||
// Make sure to skip matching minio.<domain>` this is
|
||||
// specifically meant for operator/k8s deployment
|
||||
// The reason we need to skip this is for a special
|
||||
// usecase where we need to make sure that
|
||||
// minio.<namespace>.svc.<cluster_domain> is ignored
|
||||
// by the bucketDNS style to ensure that path style
|
||||
// is available and honored at this domain.
|
||||
//
|
||||
// All other `<bucket>.<namespace>.svc.<cluster_domain>`
|
||||
// makes sure that buckets are routed through this matcher
|
||||
// to match for `<bucket>`
|
||||
return host != minioReservedBucket+"."+domainName
|
||||
}).Host("{bucket:.+}."+domainName).Subrouter())
|
||||
} else {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
}
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
@@ -93,7 +110,10 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
@@ -137,7 +157,8 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
@@ -165,6 +186,21 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketReplicationConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplicationconfiguration", httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listennotification", httpTraceAll(api.ListenNotificationHandler))).Queries("events", "{events:.*}")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
@@ -191,10 +227,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketReplicationHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
|
||||
// GetBucketTaggingHandler - this is a dummy call.
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
@@ -204,17 +237,6 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
@@ -224,15 +246,20 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
|
||||
maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketReplicationConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketreplicationconfiguration", httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// GetObjectRetention
|
||||
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
@@ -244,6 +271,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
@@ -265,6 +295,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketReplication
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketreplicationconfiguration", httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
@@ -274,16 +307,76 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
maxClients(collectAPIStats("restoreobject", httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", httpTraceAll(api.ListenNotificationHandler))).Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
func corsHandler(handler http.Handler) http.Handler {
|
||||
commonS3Headers := []string{
|
||||
xhttp.Date,
|
||||
xhttp.ETag,
|
||||
xhttp.ServerInfo,
|
||||
xhttp.Connection,
|
||||
xhttp.AcceptRanges,
|
||||
xhttp.ContentRange,
|
||||
xhttp.ContentEncoding,
|
||||
xhttp.ContentLength,
|
||||
xhttp.ContentType,
|
||||
xhttp.ContentDisposition,
|
||||
xhttp.LastModified,
|
||||
xhttp.ContentLanguage,
|
||||
xhttp.CacheControl,
|
||||
xhttp.RetryAfter,
|
||||
xhttp.AmzBucketRegion,
|
||||
xhttp.Expires,
|
||||
"X-Amz*",
|
||||
"x-amz*",
|
||||
"*",
|
||||
}
|
||||
|
||||
return cors.New(cors.Options{
|
||||
AllowOriginFunc: func(origin string) bool {
|
||||
for _, allowedOrigin := range globalAPIConfig.getCorsAllowOrigins() {
|
||||
if wildcard.MatchSimple(allowedOrigin, origin) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
},
|
||||
AllowedMethods: []string{
|
||||
http.MethodGet,
|
||||
http.MethodPut,
|
||||
http.MethodHead,
|
||||
http.MethodPost,
|
||||
http.MethodDelete,
|
||||
http.MethodOptions,
|
||||
http.MethodPatch,
|
||||
},
|
||||
AllowedHeaders: commonS3Headers,
|
||||
ExposedHeaders: commonS3Headers,
|
||||
AllowCredentials: true,
|
||||
}).Handler(handler)
|
||||
}
|
||||
|
||||
@@ -213,15 +213,15 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
|
||||
|
||||
if globalPolicyOPA == nil {
|
||||
// If OPA is not set and if ldap claim key is set, allow the claim.
|
||||
if _, ok := claims.Lookup(ldapUser); ok {
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
|
||||
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
@@ -333,8 +333,12 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
if cred.AccessKey == "" {
|
||||
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
|
||||
// Anonymous checks are not meant for ListBuckets action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
@@ -346,8 +350,26 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
@@ -360,6 +382,24 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
// In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission
|
||||
// verify as a fallback.
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred.AccessKey, owner, ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred.AccessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -423,7 +463,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
r.Body = ioutil.NopCloser(reader)
|
||||
r.Body = reader
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -519,7 +559,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.BypassGovernanceRetentionAction),
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
@@ -528,7 +568,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.Action(policy.PutObjectRetentionAction),
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
@@ -551,7 +591,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
Action: iampolicy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
@@ -561,7 +601,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
Action: iampolicy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
@@ -579,7 +619,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
@@ -600,6 +640,10 @@ func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.R
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
// Can happen when bucket has default lock config set
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
|
||||
"Content-Encoding": []string{streamingContentEncoding},
|
||||
},
|
||||
Method: "PUT",
|
||||
Method: http.MethodPut,
|
||||
},
|
||||
authT: authTypeStreamingSigned,
|
||||
},
|
||||
@@ -111,7 +111,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
},
|
||||
Method: "POST",
|
||||
Method: http.MethodPost,
|
||||
},
|
||||
authT: authTypePostPolicy,
|
||||
},
|
||||
@@ -212,7 +212,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// creating an input HTTP request.
|
||||
// Only the query parameters are relevant for this particular test.
|
||||
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error initializing input HTTP request: %v", err)
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
// creating an input HTTP request.
|
||||
// Only the query parameters are relevant for this particular test.
|
||||
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
|
||||
inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error initializing input HTTP request: %v", err)
|
||||
}
|
||||
@@ -369,15 +369,15 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
s3Error APIErrorCode
|
||||
}{
|
||||
// When request is unsigned, access denied is returned.
|
||||
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
{mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
// Empty Content-Md5 header.
|
||||
{mustNewSignedEmptyMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
{mustNewSignedEmptyMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
// Short Content-Md5 header.
|
||||
{mustNewSignedShortMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
{mustNewSignedShortMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
|
||||
// When request is properly signed, but has bad Content-MD5 header.
|
||||
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
{mustNewSignedBadMD5Request(http.MethodPut, "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
// When request is properly signed, error is none.
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -413,11 +413,11 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
Request *http.Request
|
||||
ErrCode APIErrorCode
|
||||
}{
|
||||
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
|
||||
{Request: mustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
|
||||
{Request: mustNewSignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
@@ -461,7 +461,7 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
|
||||
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -30,8 +29,10 @@ import (
|
||||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
type healTask struct {
|
||||
path string
|
||||
opts madmin.HealOpts
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
}
|
||||
@@ -53,15 +54,28 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32) {
|
||||
func waitForLowHTTPReq(tolerance int, maxWait time.Duration) {
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitCount := 10
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
toleranceFn := func() int {
|
||||
return tolerance + globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers()
|
||||
}
|
||||
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
for httpServer.GetRequestCount() >= toleranceFn() {
|
||||
time.Sleep(waitTick)
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
if waitCount == 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited %d times, resuming", waitCount)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,24 +89,20 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
bucket, object := path2BucketObject(task.path)
|
||||
switch {
|
||||
case bucket == "" && object == "":
|
||||
case task.bucket == nopHeal:
|
||||
continue
|
||||
case task.bucket == SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
case bucket != "" && object == "":
|
||||
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
|
||||
case bucket != "" && object != "":
|
||||
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
|
||||
}
|
||||
ObjectPathUpdated(path.Join(bucket, object))
|
||||
if task.responseCh != nil {
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
case task.bucket != "" && task.object == "":
|
||||
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove)
|
||||
case task.bucket != "" && task.object != "":
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
@@ -101,7 +111,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
func initHealRoutine() *healRoutine {
|
||||
func newHealRoutine() *healRoutine {
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
doneCh: make(chan struct{}),
|
||||
@@ -109,23 +119,6 @@ func initHealRoutine() *healRoutine {
|
||||
|
||||
}
|
||||
|
||||
func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = initHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
// Launch the background healer sequence to track
|
||||
// background healing operations
|
||||
info := objAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
|
||||
nh := newBgHealSequence(numDisks)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
go startBackgroundHealing(ctx, objAPI)
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
@@ -137,16 +130,5 @@ func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpt
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
if err == nil {
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -18,26 +18,34 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const defaultMonitorNewDiskInterval = time.Minute * 10
|
||||
const (
|
||||
defaultMonitorNewDiskInterval = time.Second * 10
|
||||
healingTrackerFilename = ".healing.bin"
|
||||
)
|
||||
|
||||
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
go monitorLocalDisksAndHeal(ctx, objAPI)
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
type healingTracker struct {
|
||||
ID string
|
||||
|
||||
// future add more tracking capabilities
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*xlZones)
|
||||
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
z, ok := objAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
var bgSeq *healSequence
|
||||
var found bool
|
||||
|
||||
@@ -49,68 +57,160 @@ func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
// Start with format healing
|
||||
if err := bgSeq.healDiskFormat(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
wait:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
// Attempt a heal as the server starts-up first.
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{path: SlashSeparator}
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{path: nopHeal}
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
zoneIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
if zoneIdx < 0 {
|
||||
continue
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RLock()
|
||||
// Protect reading reference format.
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[zoneIdx].format, format)
|
||||
z.serverPools[zoneIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
buckets, _ := z.ListBucketsHeal(ctx)
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
if !disk.Healing() {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s zone", disk, humanize.Ordinal(i+1))
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// reading format.json failed or not found, proceed to look
|
||||
// for new disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
|
||||
if err := saveHealingTracker(disk, diskID); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
goto wait
|
||||
}
|
||||
}
|
||||
|
||||
lbDisks := z.serverPools[i].sets[setIndex].getOnlineDisks()
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s zone complete", disk, humanize.Ordinal(i+1))
|
||||
|
||||
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
110
cmd/background-newdisks-heal-ops_gen.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ID"
|
||||
err = en.Append(0x81, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ID"
|
||||
o = append(o, 0x81, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ID":
|
||||
z.ID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z healingTracker) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
|
||||
return
|
||||
}
|
||||
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
123
cmd/background-newdisks-heal-ops_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalhealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsghealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalhealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodehealingTracker(t *testing.T) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodehealingTracker Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := healingTracker{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodehealingTracker(b *testing.B) {
|
||||
v := healingTracker{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -127,9 +127,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
@@ -143,9 +143,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
runPutObjectPartBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
@@ -159,9 +159,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
runPutObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
@@ -181,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
@@ -240,9 +240,9 @@ func generateBytesData(size int) []byte {
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
@@ -256,9 +256,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
||||
runGetObjectBenchmark(b, objLayer, objSize)
|
||||
}
|
||||
|
||||
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp XL/FS backend.
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
@@ -278,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -322,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -331,7 +331,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
@@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
@@ -80,7 +81,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r)
|
||||
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
|
||||
r.CloseWithError(err)
|
||||
close(bw.canClose)
|
||||
}()
|
||||
@@ -118,7 +119,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -139,8 +140,8 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
|
||||
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
err := &errHashMismatch{fmt.Sprintf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
@@ -33,14 +35,14 @@ type wholeBitrotWriter struct {
|
||||
}
|
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(b.volume, b.filePath, p)
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -68,15 +70,13 @@ type wholeBitrotReader struct {
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
ctx := GlobalContext
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, errLessData)
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -30,25 +30,6 @@ import (
|
||||
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
|
||||
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
|
||||
|
||||
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
|
||||
type BitrotAlgorithm uint
|
||||
|
||||
const (
|
||||
// SHA256 represents the SHA-256 hash function
|
||||
SHA256 BitrotAlgorithm = 1 + iota
|
||||
// HighwayHash256 represents the HighwayHash-256 hash function
|
||||
HighwayHash256
|
||||
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
|
||||
HighwayHash256S
|
||||
// BLAKE2b512 represents the BLAKE2b-512 hash function
|
||||
BLAKE2b512
|
||||
)
|
||||
|
||||
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
|
||||
const (
|
||||
DefaultBitrotAlgorithm = HighwayHash256S
|
||||
)
|
||||
|
||||
var bitrotAlgorithms = map[BitrotAlgorithm]string{
|
||||
SHA256: "sha256",
|
||||
BLAKE2b512: "blake2b",
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -34,12 +35,12 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
|
||||
disk, err := newPosix(tmpDir)
|
||||
disk, err := newLocalXLStorage(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(volume)
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@@ -18,18 +18,16 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
@@ -43,6 +41,7 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
bootstrapRESTMethodHealth = "/health"
|
||||
bootstrapRESTMethodVerify = "/verify"
|
||||
)
|
||||
|
||||
@@ -53,7 +52,7 @@ type bootstrapRESTServer struct{}
|
||||
type ServerSystemConfig struct {
|
||||
MinioPlatform string
|
||||
MinioRuntime string
|
||||
MinioEndpoints EndpointZones
|
||||
MinioEndpoints EndpointServerPools
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
@@ -94,6 +93,9 @@ func getServerSystemCfg() ServerSystemConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// HealthHandler returns success if request is valid
|
||||
func (b *bootstrapRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request) {}
|
||||
|
||||
func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
@@ -106,6 +108,9 @@ func registerBootstrapRESTHandlers(router *mux.Router) {
|
||||
server := &bootstrapRESTServer{}
|
||||
subrouter := router.PathPrefix(bootstrapRESTPrefix).Subrouter()
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodHealth).HandlerFunc(
|
||||
httpTraceHdrs(server.HealthHandler))
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodVerify).HandlerFunc(
|
||||
httpTraceHdrs(server.VerifyHandler))
|
||||
}
|
||||
@@ -114,42 +119,21 @@ func registerBootstrapRESTHandlers(router *mux.Router) {
|
||||
type bootstrapRESTClient struct {
|
||||
endpoint Endpoint
|
||||
restClient *rest.Client
|
||||
connected int32
|
||||
}
|
||||
|
||||
// Reconnect to a bootstrap rest server.k
|
||||
func (client *bootstrapRESTClient) reConnect() {
|
||||
atomic.StoreInt32(&client.connected, 1)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
return client.callWithContext(GlobalContext, method, values, body, length)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
if !client.IsOnline() {
|
||||
client.reConnect()
|
||||
}
|
||||
|
||||
if values == nil {
|
||||
values = make(url.Values)
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
|
||||
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
}
|
||||
|
||||
if isNetworkError(err) {
|
||||
atomic.StoreInt32(&client.connected, 0)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -158,24 +142,12 @@ func (client *bootstrapRESTClient) String() string {
|
||||
return client.endpoint.String()
|
||||
}
|
||||
|
||||
// IsOnline - returns whether RPC client failed to connect or not.
|
||||
func (client *bootstrapRESTClient) IsOnline() bool {
|
||||
return atomic.LoadInt32(&client.connected) == 1
|
||||
}
|
||||
|
||||
// Close - marks the client as closed.
|
||||
func (client *bootstrapRESTClient) Close() error {
|
||||
atomic.StoreInt32(&client.connected, 0)
|
||||
client.restClient.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify - fetches system server config.
|
||||
func (client *bootstrapRESTClient) Verify(srcCfg ServerSystemConfig) (err error) {
|
||||
func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSystemConfig) (err error) {
|
||||
if newObjectLayerFn() != nil {
|
||||
return nil
|
||||
}
|
||||
respBody, err := client.call(bootstrapRESTMethodVerify, nil, nil, -1)
|
||||
respBody, err := client.callWithContext(ctx, bootstrapRESTMethodVerify, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -187,31 +159,47 @@ func (client *bootstrapRESTClient) Verify(srcCfg ServerSystemConfig) (err error)
|
||||
return srcCfg.Diff(recvCfg)
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(endpointZones EndpointZones) error {
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointZones)
|
||||
clnts := newBootstrapRESTClients(endpointServerPools)
|
||||
var onlineServers int
|
||||
var offlineEndpoints []string
|
||||
var retries int
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(srcCfg); err != nil {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
if isNetworkError(err) {
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient {
|
||||
func newBootstrapRESTClients(endpointServerPools EndpointServerPools) []*bootstrapRESTClient {
|
||||
seenHosts := set.NewStringSet()
|
||||
var clnts []*bootstrapRESTClient
|
||||
for _, ep := range endpointZones {
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
@@ -220,11 +208,7 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
|
||||
|
||||
// Only proceed for remote endpoints.
|
||||
if !endpoint.IsLocal {
|
||||
clnt, err := newBootstrapRESTClient(endpoint)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
clnts = append(clnts, clnt)
|
||||
clnts = append(clnts, newBootstrapRESTClient(endpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -232,26 +216,15 @@ func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient
|
||||
}
|
||||
|
||||
// Returns a new bootstrap client.
|
||||
func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
|
||||
func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
serverURL := &url.URL{
|
||||
Scheme: endpoint.Scheme,
|
||||
Host: endpoint.Host,
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: endpoint.Hostname(),
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
|
||||
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient, connected: 1}, nil
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
@@ -46,15 +45,14 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketEncyrption API requires Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
||||
if !objAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -69,7 +67,12 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
// Parse bucket encryption xml
|
||||
encConfig, err := validateBucketSSEConfig(io.LimitReader(r.Body, maxBucketSSEConfigSize))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
apiErr := APIError{
|
||||
Code: "MalformedXML",
|
||||
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
|
||||
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -79,17 +82,17 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = objAPI.SetBucketSSEConfig(ctx, bucket, encConfig); err != nil {
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Update the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Set(bucket, *encConfig)
|
||||
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.SetBucketSSEConfig(ctx, bucket, encConfig)
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -122,21 +125,20 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch bucket encryption configuration from object layer
|
||||
var encConfig *bucketsse.BucketSSEConfig
|
||||
if encConfig, err = objAPI.GetBucketSSEConfig(ctx, bucket); err != nil {
|
||||
config, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var encConfigData []byte
|
||||
if encConfigData, err = xml.Marshal(encConfig); err != nil {
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write bucket encryption configuration to client
|
||||
writeSuccessResponseXML(w, encConfigData)
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketEncryptionHandler - Removes bucket encryption configuration
|
||||
@@ -167,15 +169,10 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = objAPI.DeleteBucketSSEConfig(ctx, bucket); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Remove entry from the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Remove(bucket)
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.RemoveBucketSSEConfig(ctx, bucket)
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -17,142 +17,32 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
)
|
||||
|
||||
// BucketSSEConfigSys - in-memory cache of bucket encryption config
|
||||
type BucketSSEConfigSys struct {
|
||||
sync.RWMutex
|
||||
bucketSSEConfigMap map[string]bucketsse.BucketSSEConfig
|
||||
}
|
||||
type BucketSSEConfigSys struct{}
|
||||
|
||||
// NewBucketSSEConfigSys - Creates an empty in-memory bucket encryption configuration cache
|
||||
func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
return &BucketSSEConfigSys{
|
||||
bucketSSEConfigMap: make(map[string]bucketsse.BucketSSEConfig),
|
||||
}
|
||||
}
|
||||
|
||||
// load - Loads the bucket encryption configuration for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket.Name)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketSSEConfigNotFound); ok {
|
||||
sys.Remove(bucket.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
sys.Set(bucket.Name, *config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init - Initializes in-memory bucket encryption config cache for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// We don't cache bucket encryption config in gateway mode, nothing to do.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket encryption config cache once during boot.
|
||||
return sys.load(buckets, objAPI)
|
||||
return &BucketSSEConfigSys{}
|
||||
}
|
||||
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (config bucketsse.BucketSSEConfig, ok bool) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
cfg, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return *cfg, true
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
config, ok = sys.bucketSSEConfigMap[bucket]
|
||||
return
|
||||
}
|
||||
|
||||
// Set - sets bucket encryption config to given bucket name.
|
||||
func (sys *BucketSSEConfigSys) Set(bucket string, config bucketsse.BucketSSEConfig) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
sys.bucketSSEConfigMap[bucket] = config
|
||||
}
|
||||
|
||||
// Remove - removes bucket encryption config for given bucket.
|
||||
func (sys *BucketSSEConfigSys) Remove(bucket string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketSSEConfigMap, bucket)
|
||||
}
|
||||
|
||||
// saveBucketSSEConfig - save bucket encryption config for given bucket.
|
||||
func saveBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Path to store bucket encryption config for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// getBucketSSEConfig - get bucket encryption config for given bucket.
|
||||
func getBucketSSEConfig(objAPI ObjectLayer, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
// removeBucketSSEConfig - removes bucket encryption config for given bucket.
|
||||
func removeBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
|
||||
if err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile); err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
return BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
@@ -165,5 +55,6 @@ func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
|
||||
if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 {
|
||||
return encConfig, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Unsupported bucket encryption configuration")
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user