Compare commits
666 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0e502899a8 | ||
|
|
424b44c247 | ||
|
|
01a71c366d | ||
|
|
990fbeb3a4 | ||
|
|
5a9a898ba2 | ||
|
|
fe1fbe0005 | ||
|
|
c56a139fdc | ||
|
|
df50eda811 | ||
|
|
f5d3313210 | ||
|
|
97fcc9ff99 | ||
|
|
8a6b2b4447 | ||
|
|
757eaeae92 | ||
|
|
b7dd61f6bc | ||
|
|
d2a95a04a4 | ||
|
|
0cc993f403 | ||
|
|
3a64580663 | ||
|
|
d087e28dce | ||
|
|
96adfaebe1 | ||
|
|
ddf84f8257 | ||
|
|
507f993075 | ||
|
|
73a6a60785 | ||
|
|
cf4cf58faf | ||
|
|
6bc3c74c0c | ||
|
|
598ce1e354 | ||
|
|
4685b76e08 | ||
|
|
78c9109f6c | ||
|
|
7e248fc0ba | ||
|
|
c526fa9119 | ||
|
|
520e0fd985 | ||
|
|
54a7eba358 | ||
|
|
1494ba2e6e | ||
|
|
a5b3548ede | ||
|
|
8318aa0113 | ||
|
|
e69c42956b | ||
|
|
53ca589c11 | ||
|
|
ca8ff8718e | ||
|
|
e8e48e4c4a | ||
|
|
67e17ed3f8 | ||
|
|
2a6a40e93b | ||
|
|
eda34423d7 | ||
|
|
7ce1f6e736 | ||
|
|
5c53620a72 | ||
|
|
5f94cec1e2 | ||
|
|
646350fa7f | ||
|
|
e162a055cc | ||
|
|
28d3ad3ada | ||
|
|
0bd44a7764 | ||
|
|
8be6d887e2 | ||
|
|
66b14a0d32 | ||
|
|
153a612253 | ||
|
|
1a1b55e133 | ||
|
|
879de20edf | ||
|
|
e77ad3f9bb | ||
|
|
4ce86ff5fa | ||
|
|
e290c010e6 | ||
|
|
33d267fa1b | ||
|
|
601a744159 | ||
|
|
f630d7c3fa | ||
|
|
91bfefcf8c | ||
|
|
a1b01e6d5f | ||
|
|
48594617b5 | ||
|
|
b35b9dcff7 | ||
|
|
a3e317773a | ||
|
|
16431d222c | ||
|
|
ee49a23220 | ||
|
|
a9eef521ec | ||
|
|
901d33b59c | ||
|
|
255116fde7 | ||
|
|
00ebea2536 | ||
|
|
dedf9774c7 | ||
|
|
6b1c62133d | ||
|
|
d4251b2545 | ||
|
|
b9d1698d74 | ||
|
|
7c696e1cb6 | ||
|
|
165d60421d | ||
|
|
c7962118f8 | ||
|
|
892a204013 | ||
|
|
0e6aedc7ed | ||
|
|
c547a4d835 | ||
|
|
fc9668baa5 | ||
|
|
ba17d46f15 | ||
|
|
54a4f93854 | ||
|
|
bdd816488d | ||
|
|
36dcfee2f7 | ||
|
|
4d13ddf6b3 | ||
|
|
9e25475475 | ||
|
|
e955aa7f2a | ||
|
|
81d2b54dfd | ||
|
|
7956ff0313 | ||
|
|
9ff25fb64b | ||
|
|
04df69f633 | ||
|
|
908eb57795 | ||
|
|
ecfae074dc | ||
|
|
be5d394e56 | ||
|
|
849a27ee61 | ||
|
|
062f3ea43a | ||
|
|
5cfedcfe33 | ||
|
|
4d2fc530d0 | ||
|
|
3970204009 | ||
|
|
f046f557fa | ||
|
|
401958938d | ||
|
|
566cffe53d | ||
|
|
028bc2f9be | ||
|
|
813d9bc316 | ||
|
|
79ba458051 | ||
|
|
cf220be9b5 | ||
|
|
c433572585 | ||
|
|
a42b576382 | ||
|
|
fb9b53026d | ||
|
|
2ac54e5a7b | ||
|
|
8eecdc6d1f | ||
|
|
50577e2bd2 | ||
|
|
7bc1f986e8 | ||
|
|
d796621ccc | ||
|
|
751e9fb7be | ||
|
|
f6113264f4 | ||
|
|
a3534a730b | ||
|
|
7f8b8a0e43 | ||
|
|
bd6f7b6d83 | ||
|
|
b0a4beb66a | ||
|
|
472c2d828c | ||
|
|
01ee49045e | ||
|
|
7bd9f821dd | ||
|
|
b20ecc7b54 | ||
|
|
61eb9d4e29 | ||
|
|
43eb5a001c | ||
|
|
f58692abb7 | ||
|
|
c1760fb764 | ||
|
|
e9bc0e7e98 | ||
|
|
ffcadcd99e | ||
|
|
7a733a8d54 | ||
|
|
ce97313fda | ||
|
|
7b81967a3c | ||
|
|
ff811f594b | ||
|
|
0bf80b3c89 | ||
|
|
ae3b369fe1 | ||
|
|
77b15e7194 | ||
|
|
20537f974e | ||
|
|
4476a64bdf | ||
|
|
d4b701576e | ||
|
|
721c053712 | ||
|
|
e3071157f0 | ||
|
|
c07af89e48 | ||
|
|
9c846106fa | ||
|
|
cf94d1f1f1 | ||
|
|
6187440f35 | ||
|
|
57b7c3494f | ||
|
|
dda18c28c5 | ||
|
|
f8d6eaaa96 | ||
|
|
47d4fabb58 | ||
|
|
80039f60d5 | ||
|
|
5a5e9b8a89 | ||
|
|
b7ed3b77bd | ||
|
|
75b925c326 | ||
|
|
91d419ee6c | ||
|
|
23345098ea | ||
|
|
7ce91ea1a1 | ||
|
|
41079f1015 | ||
|
|
712dfa40cd | ||
|
|
decfd6108c | ||
|
|
b890bbfa63 | ||
|
|
0e3a570b85 | ||
|
|
7060c809c0 | ||
|
|
9dbfd84c5b | ||
|
|
fce380a044 | ||
|
|
46ba15ab03 | ||
|
|
1e39ca39c3 | ||
|
|
80ef1ae51c | ||
|
|
4d0715d226 | ||
|
|
8a274169da | ||
|
|
21d8298fe1 | ||
|
|
5d6f6d8d5b | ||
|
|
bacf6156c1 | ||
|
|
1d1b213f1f | ||
|
|
1f11af42f1 | ||
|
|
a026c8748f | ||
|
|
9f7d89b3cd | ||
|
|
92a77cc78e | ||
|
|
b0c84e3de7 | ||
|
|
bbc914e174 | ||
|
|
3fca4055d2 | ||
|
|
66afa16aed | ||
|
|
9b0a8de7de | ||
|
|
04bbede17d | ||
|
|
0e3bafcc54 | ||
|
|
b48f719b8e | ||
|
|
289fcbd08c | ||
|
|
f6875bb893 | ||
|
|
7e803adf13 | ||
|
|
5b5deee5b3 | ||
|
|
7dae4cb685 | ||
|
|
27fad98179 | ||
|
|
58f7e3a829 | ||
|
|
4a15bd8ff8 | ||
|
|
b030ef1aca | ||
|
|
cc46a99f97 | ||
|
|
becec6cb6b | ||
|
|
bc33db9fc0 | ||
|
|
7d4579e737 | ||
|
|
b7c90751b0 | ||
|
|
88fd1cba71 | ||
|
|
e43cc316ff | ||
|
|
e3f24a29fa | ||
|
|
890e526bde | ||
|
|
16ce455fca | ||
|
|
29b7164468 | ||
|
|
acdd03f609 | ||
|
|
03b35ecdd0 | ||
|
|
5307e18085 | ||
|
|
2cea944cdb | ||
|
|
c08540c7b7 | ||
|
|
3934700a08 | ||
|
|
0913eb6655 | ||
|
|
1bfbe354f5 | ||
|
|
2d78e20120 | ||
|
|
77210513c9 | ||
|
|
2e6f8bdf19 | ||
|
|
25144fedd5 | ||
|
|
27f64dd9a4 | ||
|
|
9d7648f02f | ||
|
|
1ef8babfef | ||
|
|
4ea7bf0510 | ||
|
|
5dcf1d13a9 | ||
|
|
94d37d05e5 | ||
|
|
0cbdc458c5 | ||
|
|
c1437c7b46 | ||
|
|
f357f65d04 | ||
|
|
ef8e952fc4 | ||
|
|
a2bc383e15 | ||
|
|
23930355a7 | ||
|
|
bb9f41e613 | ||
|
|
bc110d8055 | ||
|
|
b23b19e5c3 | ||
|
|
65b1a4282e | ||
|
|
1dbb3f6f43 | ||
|
|
af3dc25dfe | ||
|
|
5a0c0079a1 | ||
|
|
af8f563ed3 | ||
|
|
93af4a4864 | ||
|
|
28f188e3ef | ||
|
|
b29224f62f | ||
|
|
d756da41b9 | ||
|
|
cdab4a3b85 | ||
|
|
b88c57ba93 | ||
|
|
1a5496eced | ||
|
|
b264e6a191 | ||
|
|
ae1b495262 | ||
|
|
16939ca192 | ||
|
|
60cd513a33 | ||
|
|
27d94c64ed | ||
|
|
21a0f857d3 | ||
|
|
03a6e8aee2 | ||
|
|
d0862ddf86 | ||
|
|
4afbb89774 | ||
|
|
5ec57a9533 | ||
|
|
f088e8960b | ||
|
|
27dec42ad6 | ||
|
|
b70053090c | ||
|
|
f10e2254ae | ||
|
|
1f92fc3fc0 | ||
|
|
f71b114a84 | ||
|
|
e3e0532613 | ||
|
|
2c0f121550 | ||
|
|
6f41cff75a | ||
|
|
9b39616c1b | ||
|
|
fad3d66093 | ||
|
|
ff99ef74c8 | ||
|
|
6990e73b11 | ||
|
|
860a1237ab | ||
|
|
97b5bf1fb7 | ||
|
|
ed3418c046 | ||
|
|
a2230868e0 | ||
|
|
71bab74148 | ||
|
|
661ea57907 | ||
|
|
1f18efb0ba | ||
|
|
8ae46bce93 | ||
|
|
f19a414e09 | ||
|
|
0ee2933234 | ||
|
|
9890f579f8 | ||
|
|
2ee337ead5 | ||
|
|
362e14fa1a | ||
|
|
524fe62594 | ||
|
|
3c87e1e60d | ||
|
|
0cac868a36 | ||
|
|
2480c66857 | ||
|
|
186c477f3c | ||
|
|
570670be8c | ||
|
|
22b7226581 | ||
|
|
f16f715b59 | ||
|
|
75adb787c4 | ||
|
|
6123377e66 | ||
|
|
778cccb15d | ||
|
|
0256dae657 | ||
|
|
88a93838de | ||
|
|
0855988427 | ||
|
|
84b121bbe1 | ||
|
|
48fb7b0dd7 | ||
|
|
01e550a9be | ||
|
|
63a2e0bab6 | ||
|
|
24657859a8 | ||
|
|
67d07e895c | ||
|
|
d7df6bc738 | ||
|
|
a4e1de93a7 | ||
|
|
41be557f0c | ||
|
|
9417fd933e | ||
|
|
067d21d0f2 | ||
|
|
3882da6ac5 | ||
|
|
77b780b8ca | ||
|
|
127e8bf3b6 | ||
|
|
74faed166a | ||
|
|
dbd05d6e82 | ||
|
|
b5d35c7e09 | ||
|
|
c39eb3bacd | ||
|
|
57fad9148c | ||
|
|
0f88cdc80e | ||
|
|
e2a9949b16 | ||
|
|
38e3c7a8f7 | ||
|
|
67f166fa02 | ||
|
|
c7df5fb119 | ||
|
|
a4be47d7ad | ||
|
|
aaea94a48d | ||
|
|
c3d9c45f58 | ||
|
|
a2a48cc065 | ||
|
|
cf407f7176 | ||
|
|
d6dd17a483 | ||
|
|
a66071099c | ||
|
|
9a6e569412 | ||
|
|
7dfa565d00 | ||
|
|
d2e5f01542 | ||
|
|
f4e373e0d2 | ||
|
|
c8691db2b7 | ||
|
|
affe51cb19 | ||
|
|
57118919d2 | ||
|
|
7db05a80dd | ||
|
|
a8ba71edef | ||
|
|
45a99c3fd3 | ||
|
|
58e6b83e95 | ||
|
|
f556a72fe2 | ||
|
|
cd7a5cab8a | ||
|
|
67b5e0dbe8 | ||
|
|
b68f0cbde4 | ||
|
|
ebc3627c73 | ||
|
|
295730408b | ||
|
|
5a9f133491 | ||
|
|
f30afa4956 | ||
|
|
171cedf0f0 | ||
|
|
27d8ef14f8 | ||
|
|
f6d13f57bb | ||
|
|
5f36167f1a | ||
|
|
8fb4ae916c | ||
|
|
48da4aeee0 | ||
|
|
07df9eecda | ||
|
|
7f214a0e46 | ||
|
|
e1a0a1e73c | ||
|
|
1278b0ec73 | ||
|
|
3e9bd931ed | ||
|
|
9d588319dd | ||
|
|
0012ca8ca5 | ||
|
|
288e276abe | ||
|
|
f22e745514 | ||
|
|
070c31eac5 | ||
|
|
1a56ebea70 | ||
|
|
70e1cbda21 | ||
|
|
1ede3967c1 | ||
|
|
60f2df54e0 | ||
|
|
ba708f51f2 | ||
|
|
b106b1c131 | ||
|
|
0df31f63ab | ||
|
|
64d4da5a37 | ||
|
|
7aec38a73e | ||
|
|
a2fd8caa69 | ||
|
|
f546636c52 | ||
|
|
38ccc4f672 | ||
|
|
04e669a6be | ||
|
|
cc3f139d1f | ||
|
|
d50442da01 | ||
|
|
404b05a44c | ||
|
|
3d7c1ad31d | ||
|
|
d300e775a6 | ||
|
|
7ee2d1c339 | ||
|
|
54a98773f8 | ||
|
|
737a3f0bad | ||
|
|
3bd9636a5b | ||
|
|
76b21de0c6 | ||
|
|
dabb058167 | ||
|
|
f394313fee | ||
|
|
b7c5e45fff | ||
|
|
0a224654c2 | ||
|
|
47e4a36d7e | ||
|
|
e420a1de4d | ||
|
|
62dc0f7698 | ||
|
|
2d31d92271 | ||
|
|
1981fe2072 | ||
|
|
f68bd37acf | ||
|
|
76877eb6fa | ||
|
|
3d66d053c7 | ||
|
|
0d3ae3810f | ||
|
|
0e31cff762 | ||
|
|
c27110e37d | ||
|
|
89441a22aa | ||
|
|
557135185c | ||
|
|
4d39fd4165 | ||
|
|
f4c03e56b8 | ||
|
|
d2b6aa9033 | ||
|
|
5dd40b9377 | ||
|
|
001b77e7e1 | ||
|
|
9d91d32d82 | ||
|
|
a60ac7ca17 | ||
|
|
42ba0da6b0 | ||
|
|
f527c708f2 | ||
|
|
6f474982ed | ||
|
|
fd6cd52728 | ||
|
|
c9e49f4366 | ||
|
|
46fd9f4a53 | ||
|
|
4da641533d | ||
|
|
79df2c7ce7 | ||
|
|
3e28af1723 | ||
|
|
866a95de38 | ||
|
|
6aa0574a53 | ||
|
|
bb97eafa82 | ||
|
|
51d5efee1b | ||
|
|
c980804514 | ||
|
|
b883803b21 | ||
|
|
7e3a7d7044 | ||
|
|
9ad6012782 | ||
|
|
5a96cbbeaa | ||
|
|
416977436e | ||
|
|
54ec0a1308 | ||
|
|
ebd78e983f | ||
|
|
1cf726348f | ||
|
|
41f75e6d1b | ||
|
|
0e3037631f | ||
|
|
6fbf4f96b6 | ||
|
|
e35709a99e | ||
|
|
f3602d7d08 | ||
|
|
526e10a2e0 | ||
|
|
0b21734571 | ||
|
|
499872f31d | ||
|
|
5cc16e098c | ||
|
|
364e27d5f2 | ||
|
|
1f4e0bd17c | ||
|
|
0557e18472 | ||
|
|
cfd66ab8c3 | ||
|
|
691b763613 | ||
|
|
3ddb501190 | ||
|
|
997e808088 | ||
|
|
13441ad0f8 | ||
|
|
aa508591c1 | ||
|
|
818f0201fc | ||
|
|
890f43ffa5 | ||
|
|
e270ab65b3 | ||
|
|
926373f9c1 | ||
|
|
111c6177d2 | ||
|
|
91f72f25ab | ||
|
|
da540ccf8c | ||
|
|
d6396f82fe | ||
|
|
4fa250a6a1 | ||
|
|
b42cfcea60 | ||
|
|
aca6dfbd60 | ||
|
|
5f7e6d03ff | ||
|
|
88ad742da0 | ||
|
|
a8d4042853 | ||
|
|
44a9339c0a | ||
|
|
113c7ff49a | ||
|
|
40dbe243d9 | ||
|
|
d422d24278 | ||
|
|
109c927dad | ||
|
|
de400f3473 | ||
|
|
8144a125ce | ||
|
|
3e34e41a5a | ||
|
|
2270887d43 | ||
|
|
c6431f9a04 | ||
|
|
88c0d0120c | ||
|
|
44fefe5b9f | ||
|
|
878d368cea | ||
|
|
f2bd026d0e | ||
|
|
518612492c | ||
|
|
81e43b87c2 | ||
|
|
a02e17f15c | ||
|
|
c76f86fdbd | ||
|
|
5b7c00ff52 | ||
|
|
b9f0046ee7 | ||
|
|
3b79f7e4ae | ||
|
|
85d2df02b9 | ||
|
|
2f1e8ba612 | ||
|
|
84c690cb07 | ||
|
|
239bbad7ab | ||
|
|
4be8023408 | ||
|
|
83e8da57b8 | ||
|
|
dcff6c996d | ||
|
|
0a66a6f1e5 | ||
|
|
e82a5c5c54 | ||
|
|
92fdcafb66 | ||
|
|
b9aae1aaae | ||
|
|
7d70afc937 | ||
|
|
12b63061c2 | ||
|
|
038fdeea83 | ||
|
|
0b6225bcc3 | ||
|
|
f286ef8e17 | ||
|
|
b120bcb60a | ||
|
|
be34fc9134 | ||
|
|
8591d17d82 | ||
|
|
f6190d6751 | ||
|
|
f0fc77fded | ||
|
|
f56cac6381 | ||
|
|
4f35054d29 | ||
|
|
d29df6714a | ||
|
|
7460fb8349 | ||
|
|
a7c430355a | ||
|
|
1df1517449 | ||
|
|
f7c357ebad | ||
|
|
20c60aae68 | ||
|
|
b14527b7af | ||
|
|
f840080e5b | ||
|
|
2c6983a2f1 | ||
|
|
acfb83ec5e | ||
|
|
3db931dc0e | ||
|
|
8309ddd486 | ||
|
|
21c868a646 | ||
|
|
ffe9acfe4a | ||
|
|
24d904d194 | ||
|
|
b280a37c4d | ||
|
|
906548d0ba | ||
|
|
1485a5bf3b | ||
|
|
9ec197f2e8 | ||
|
|
d21466f595 | ||
|
|
4f3290309e | ||
|
|
d6fe0f61a9 | ||
|
|
5a22f2cf0b | ||
|
|
42d11d9e7d | ||
|
|
e49c184595 | ||
|
|
99d87c5ca2 | ||
|
|
4c0f48c548 | ||
|
|
4ce6d35e30 | ||
|
|
81bf0c66c6 | ||
|
|
34dc725d26 | ||
|
|
a5db4ca092 | ||
|
|
61029fe20b | ||
|
|
fee3f88cb5 | ||
|
|
932500e43d | ||
|
|
55d4cdd464 | ||
|
|
fe3e47b1e8 | ||
|
|
9ca25bd48f | ||
|
|
91e0823ff0 | ||
|
|
142c6b11b3 | ||
|
|
ef0b8367b5 | ||
|
|
d1bfb4d2c0 | ||
|
|
3b5d6f003f | ||
|
|
26c457860b | ||
|
|
d0515031c7 | ||
|
|
08f4a0a816 | ||
|
|
28f95f1fbe | ||
|
|
c791de0e1e | ||
|
|
36b5426f6e | ||
|
|
1e72e9b1cd | ||
|
|
9739e55d0f | ||
|
|
1cddbc80cf | ||
|
|
3da9ee15d3 | ||
|
|
1e2fac054c | ||
|
|
914bfb2d9c | ||
|
|
40244994ad | ||
|
|
556ae07857 | ||
|
|
17fd71164c | ||
|
|
81d19156e9 | ||
|
|
7b82411e6f | ||
|
|
fb268add7a | ||
|
|
7700973538 | ||
|
|
54e25a0251 | ||
|
|
79b3a1fe4e | ||
|
|
faf013ec84 | ||
|
|
7152915318 | ||
|
|
886262e58a | ||
|
|
9c5d9ae376 | ||
|
|
3d2bc15e9a | ||
|
|
20c43c447d | ||
|
|
4caed7cc0d | ||
|
|
5b68f8ea6a | ||
|
|
8378bc9958 | ||
|
|
367cb48096 | ||
|
|
661b263e77 | ||
|
|
07c5e72cdb | ||
|
|
7752cdbfaf | ||
|
|
4545ecad58 | ||
|
|
ac74237f01 | ||
|
|
5a36179c19 | ||
|
|
82d73f387d | ||
|
|
e8c6314770 | ||
|
|
087c1b98dc | ||
|
|
68c5ad83fb | ||
|
|
5acc8c0134 | ||
|
|
c897b6a82d | ||
|
|
d008e90d50 | ||
|
|
ea820b30bf | ||
|
|
03725dc015 | ||
|
|
0a6f9bc1eb | ||
|
|
1946922de3 | ||
|
|
edf1f4233b | ||
|
|
f4b55ea7a7 | ||
|
|
8dfd1f03e9 | ||
|
|
acf26c5ab7 | ||
|
|
d9800c8135 | ||
|
|
02bef7560f | ||
|
|
07dd0692b6 | ||
|
|
4f3317effe | ||
|
|
9afdbe3648 | ||
|
|
fe0df01448 | ||
|
|
12e6907512 | ||
|
|
5aef492b4c | ||
|
|
5d7ed8ff7d | ||
|
|
b1754fc5ff | ||
|
|
19bbf3e142 | ||
|
|
e1755275a0 | ||
|
|
520037e721 | ||
|
|
cbb0828ab8 | ||
|
|
8774d10bdf | ||
|
|
df9f479d58 | ||
|
|
8bb52c9c2a | ||
|
|
947c423824 | ||
|
|
c3d24fb26d | ||
|
|
112f9ae087 | ||
|
|
01b9ff54d9 | ||
|
|
64a1904136 | ||
|
|
bce6864785 | ||
|
|
ecd54b4cba | ||
|
|
ca2b288a4b | ||
|
|
1016fbb8f9 | ||
|
|
be3f81c7ec | ||
|
|
9f3c151c3c | ||
|
|
9735f3d8f2 | ||
|
|
34680c5ccf | ||
|
|
58934e5881 | ||
|
|
ad3f98b8e7 | ||
|
|
7c33a33ef3 | ||
|
|
3dfcca68e6 | ||
|
|
73b74c94a1 | ||
|
|
18338d60d5 | ||
|
|
e106070640 | ||
|
|
091a7ae359 | ||
|
|
70160aeab3 | ||
|
|
1aa08f594d | ||
|
|
14d8a931fe | ||
|
|
30ba85bc67 | ||
|
|
caadcc3ed8 | ||
|
|
26f55472c6 | ||
|
|
79a58e275c | ||
|
|
900e584514 | ||
|
|
bb639d9f29 | ||
|
|
15dcacc1fc | ||
|
|
6d53e3c2d7 | ||
|
|
8ed7346273 | ||
|
|
3c1220adca | ||
|
|
4ed0eb7012 | ||
|
|
2af5445309 | ||
|
|
abb1916bda | ||
|
|
9424dca9e4 | ||
|
|
db84bb9bd3 | ||
|
|
c603f85488 | ||
|
|
2f1ee25f50 | ||
|
|
7bdf9005e5 | ||
|
|
d9c1d79e30 | ||
|
|
bd88b86919 | ||
|
|
8e29ae8c44 | ||
|
|
d158607f8e | ||
|
|
939fbb3c38 | ||
|
|
3b9dfa9d29 | ||
|
|
0c76fb57f2 |
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -7,6 +7,11 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
All GitHub issues are addressed on a best-effort basis at MinIO's sole discretion. There are no Service Level Agreements (SLA) or Objectives (SLO). Remember our [Code of Conduct](https://github.com/minio/minio/blob/master/code_of_conduct.md) when engaging with MinIO Engineers and the larger community.
|
||||
|
||||
For urgent issues (e.g. production down, etc.), subscribe to [SUBNET](https://min.io/pricing?jmp=github) for direct to engineering support.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,6 +7,9 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
If this case is urgent, please subscribe to [Subnet](https://min.io/pricing) so that our 24/7 support team may help you faster.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
39
.github/lock.yml
vendored
39
.github/lock.yml
vendored
@@ -1,39 +0,0 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: true
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
5
.github/markdown-lint-cfg.yaml
vendored
Normal file
5
.github/markdown-lint-cfg.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Config file for markdownlint-cli
|
||||
MD033:
|
||||
allowed_elements:
|
||||
- details
|
||||
- summary
|
||||
2
.github/stale.yml
vendored
2
.github/stale.yml
vendored
@@ -14,7 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
- "do-not-close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
23
.github/workflows/go-cross.yml
vendored
23
.github/workflows/go-cross.yml
vendored
@@ -1,26 +1,33 @@
|
||||
name: Go
|
||||
name: Crosscompile
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO crosscompile tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Build Tests with Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
go-version: [1.17.x, 1.18.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
- uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
50
.github/workflows/go-healing.yml
vendored
Normal file
50
.github/workflows/go-healing.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Healing Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:oyArl7zlPECEduNbB1KXgdzDn2Bdpvvw0l8VO51HQnY="
|
||||
MINIO_KMS_AUTO_ENCRYPTION: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify-healing
|
||||
make verify-healing-inconsistent-versions
|
||||
37
.github/workflows/go-lint.yml
vendored
37
.github/workflows/go-lint.yml
vendored
@@ -1,23 +1,51 @@
|
||||
name: Go
|
||||
name: Linters and Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
go-version: [1.17.x, 1.18.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: |
|
||||
%LocalAppData%\go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
@@ -39,4 +67,5 @@ jobs:
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -deps -json ./... | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' | ./nancy sleuth
|
||||
make
|
||||
make test
|
||||
make test-race
|
||||
|
||||
27
.github/workflows/go.yml
vendored
27
.github/workflows/go.yml
vendored
@@ -1,23 +1,41 @@
|
||||
name: Go
|
||||
name: Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO Setup on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }} - healing
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
go-version: [1.17.x, 1.18.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
@@ -32,4 +50,3 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify
|
||||
make verify-healing
|
||||
|
||||
118
.github/workflows/iam-integrations.yaml
vendored
Normal file
118
.github/workflows/iam-integrations.yaml
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
name: IAM integration
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
iam-matrix-test:
|
||||
name: "[Go=${{ matrix.go-version }}|ldap=${{ matrix.ldap }}|etcd=${{ matrix.etcd }}|openid=${{ matrix.openid }}]"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: quay.io/minio/openldap
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
env:
|
||||
LDAP_ORGANIZATION: "MinIO Inc"
|
||||
LDAP_DOMAIN: "min.io"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
etcd:
|
||||
image: "quay.io/coreos/etcd:v3.5.1"
|
||||
env:
|
||||
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ports:
|
||||
- "2379:2379"
|
||||
options: >-
|
||||
--health-cmd "etcdctl endpoint health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
openid:
|
||||
image: quay.io/minio/dex
|
||||
ports:
|
||||
- "5556:5556"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
openid2:
|
||||
image: quay.io/minio/dex
|
||||
ports:
|
||||
- "5557:5557"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
DEX_ISSUER: "http://127.0.0.1:5557/dex"
|
||||
DEX_WEB_HTTP: "0.0.0.0:5557"
|
||||
|
||||
strategy:
|
||||
# When ldap, etcd or openid vars are empty below, those external servers
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
exclude:
|
||||
# exclude combos where all are empty.
|
||||
- ldap: ""
|
||||
etcd: ""
|
||||
openid: ""
|
||||
# exclude combos where both ldap and openid IDPs are specified.
|
||||
- ldap: "localhost:389"
|
||||
openid: "http://127.0.0.1:5556/dex"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Test LDAP/OpenID/Etcd combo
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test with multiple OpenID providers
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
OPENID_TEST_SERVER_2: "http://127.0.0.1:5557/dex"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
make test-site-replication-ldap
|
||||
- name: Test OIDC for automatic site replication
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
24
.github/workflows/lock.yml
vendored
Normal file
24
.github/workflows/lock.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: '365'
|
||||
exclude-any-issue-labels: 'do-not-close'
|
||||
issue-lock-reason: 'resolved'
|
||||
log-output: true
|
||||
30
.github/workflows/markdown-lint.yaml
vendored
Normal file
30
.github/workflows/markdown-lint.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Markdown Linter
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint all docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Lint all docs
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
markdownlint --fix '**/*.md' \
|
||||
--config /home/runner/work/minio/minio/.github/markdown-lint-cfg.yaml \
|
||||
--disable MD013 MD040
|
||||
50
.github/workflows/replication.yaml
vendored
Normal file
50
.github/workflows/replication.yaml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Multi-site replication tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
replication-test:
|
||||
name: Replication Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Test Replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-replication
|
||||
- name: Test MinIO IDP for automatic site replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-site-replication-minio
|
||||
|
||||
34
.github/workflows/upgrade-ci-cd.yaml
vendored
Normal file
34
.github/workflows/upgrade-ci-cd.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Upgrade old version tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Start upgrade tests
|
||||
run: |
|
||||
make test-upgrade
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -21,4 +21,15 @@ prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
node_modules/
|
||||
node_modules/
|
||||
mc.*
|
||||
s3-check-md5*
|
||||
xl-meta*
|
||||
healing-*
|
||||
inspect*
|
||||
200M*
|
||||
hash-set
|
||||
minio.RELEASE*
|
||||
mc
|
||||
nancy
|
||||
inspects/*
|
||||
@@ -23,12 +23,30 @@ linters:
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- tenv
|
||||
- durationcheck
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
lang-version: "1.17"
|
||||
|
||||
# Choose whether or not to use the extra rules that are disabled
|
||||
# by default
|
||||
extra-rules: false
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
# todo fix these when we get enough time.
|
||||
- "singleCaseSwitch: should rewrite switch statement to if statement"
|
||||
- "unlambda: replace"
|
||||
- "captLocal:"
|
||||
- "ifElseChain:"
|
||||
- "elseif:"
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
||||
7
COMPLIANCE.md
Normal file
7
COMPLIANCE.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# AGPLv3 Compliance
|
||||
|
||||
We have designed MinIO as an Open Source software for the Open Source software community. This requires applications to consider whether their usage of MinIO is in compliance with the GNU AGPLv3 [license](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
MinIO cannot make the determination as to whether your application's usage of MinIO is in compliance with the AGPLv3 license requirements. You should instead rely on your own legal counsel or licensing specialists to audit and ensure your application is in compliance with the licenses of MinIO and all other open-source projects with which your application integrates or interacts. We understand that AGPLv3 licensing is complex and nuanced. It is for that reason we strongly encourage using experts in licensing to make any such determinations around compliance instead of relying on apocryphal or anecdotal advice.
|
||||
|
||||
[MinIO Commercial Licensing](https://min.io/pricing) is the best option for applications that trigger AGPLv3 obligations (e.g. open sourcing your application). Applications using MinIO - or any other OSS-licensed code - without validating their usage do so at their own risk.
|
||||
@@ -7,15 +7,17 @@
|
||||
Start by forking the MinIO GitHub repository, make changes in a branch and then send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details:
|
||||
|
||||
### Setup your MinIO GitHub Repository
|
||||
|
||||
Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to your own personal repository. Copy the URL of your MinIO fork (you will need it for the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/minio/minio
|
||||
$ go install -v
|
||||
$ ls /go/bin/minio
|
||||
git clone https://github.com/minio/minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
```sh
|
||||
$ cd minio
|
||||
$ git remote add upstream https://github.com/minio/minio
|
||||
@@ -25,13 +27,15 @@ $ git merge upstream/master
|
||||
```
|
||||
|
||||
### Create your feature branch
|
||||
|
||||
Before making code changes, make sure you create a separate branch for these changes
|
||||
|
||||
```
|
||||
$ git checkout -b my-new-feature
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
### Test MinIO server changes
|
||||
|
||||
After your code changes, make sure
|
||||
|
||||
- To add test cases for the new code. If you have questions about how to do it, please ask on our [Slack](https://slack.min.io) channel.
|
||||
@@ -40,29 +44,38 @@ After your code changes, make sure
|
||||
- To run `make test` and `make build` completes.
|
||||
|
||||
### Commit changes
|
||||
|
||||
After verification, commit your changes. This is a [great post](https://chris.beams.io/posts/git-commit/) on how to write useful commit messages
|
||||
|
||||
```
|
||||
$ git commit -am 'Add some feature'
|
||||
git commit -am 'Add some feature'
|
||||
```
|
||||
|
||||
### Push to the branch
|
||||
|
||||
Push your locally committed changes to the remote origin (your fork)
|
||||
|
||||
```
|
||||
$ git push origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
```
|
||||
|
||||
### Create a Pull Request
|
||||
|
||||
Pull requests can be created via GitHub. Refer to [this document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged.
|
||||
|
||||
## FAQs
|
||||
|
||||
### How does ``MinIO`` manage dependencies?
|
||||
|
||||
``MinIO`` uses `go mod` to manage its dependencies.
|
||||
|
||||
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
|
||||
|
||||
To remove a dependency
|
||||
|
||||
- Edit your code and remove the import reference.
|
||||
- Run `go mod tidy` in the source folder to remove dependency from `go.mod` file.
|
||||
|
||||
### What are the coding guidelines for MinIO?
|
||||
|
||||
``MinIO`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.min.io).
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
FROM minio/minio:edge
|
||||
FROM minio/minio:latest
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY minio /usr/bin/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
|
||||
50
Dockerfile.hotfix
Normal file
50
Dockerfile.hotfix
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -19,7 +19,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=$PATH:/opt/bin
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -27,8 +27,9 @@ COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
@@ -39,7 +40,8 @@ RUN \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -19,7 +19,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=$PATH:/opt/bin
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -27,8 +27,9 @@ COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
@@ -39,7 +40,8 @@ RUN \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
74
Makefile
74
Makefile
@@ -19,9 +19,9 @@ help: ## print this help
|
||||
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer)
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -34,45 +34,89 @@ check-gen: ## check for updated autogenerated files
|
||||
|
||||
lint: ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
|
||||
check: test
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
test-race: verifiers build
|
||||
test-upgrade: build
|
||||
@echo "Running minio upgrade tests"
|
||||
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
|
||||
|
||||
test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-replication: install ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
|
||||
|
||||
test-site-replication-ldap: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with LDAP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-ldap.sh)
|
||||
|
||||
test-site-replication-oidc: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with OIDC)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-oidc.sh)
|
||||
|
||||
test-site-replication-minio: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
$(eval TAG := "minio/minio:$(VERSION)")
|
||||
|
||||
docker-hotfix: hotfix checks ## builds minio docker container with hotfix tags
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
@mv -f ./minio ./minio.$(VERSION)
|
||||
@minisign -qQSm ./minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < ./minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio.$(VERSION).sha256sum
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@docker push -q $(TAG) && echo "Published new container $(TAG)"
|
||||
|
||||
docker-hotfix: hotfix-push checks ## builds minio docker container with hotfix tags
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@docker build -q --no-cache -t $(TAG) --build-arg RELEASE=$(VERSION) . -f Dockerfile.hotfix
|
||||
|
||||
docker: build checks ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
|
||||
111
README.md
111
README.md
@@ -1,13 +1,14 @@
|
||||
# MinIO Quickstart Guide
|
||||
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/) [](https://github.com/minio/minio/blob/master/LICENSE)
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Container Installation
|
||||
## Container Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server as a container.
|
||||
|
||||
@@ -16,7 +17,7 @@ require distributed deploying MinIO with Erasure Coding. For extended developmen
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
### Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
|
||||
|
||||
@@ -26,22 +27,22 @@ podman run -p 9000:9000 -p 9001:9001 \
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
|
||||
# macOS
|
||||
## macOS
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
### Homebrew (recommended)
|
||||
|
||||
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -57,11 +58,11 @@ brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
### Binary Download
|
||||
|
||||
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -71,11 +72,11 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
# GNU/Linux
|
||||
## GNU/Linux
|
||||
|
||||
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -91,18 +92,18 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
|
||||
| Architecture | URL |
|
||||
| -------- | ------ |
|
||||
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
## Microsoft Windows
|
||||
|
||||
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
|
||||
|
||||
@@ -116,31 +117,31 @@ Use the following command to run a standalone MinIO server on the Windows host.
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
# Install from Source
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.17](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go install github.com/minio/minio@latest
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
## Deployment Recommendations
|
||||
|
||||
## Allow port access for Firewalls
|
||||
### Allow port access for Firewalls
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
@@ -196,18 +197,21 @@ service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
# Test MinIO Connectivity
|
||||
## Test MinIO Connectivity
|
||||
|
||||
## Test using MinIO Console
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
### Test using MinIO Console
|
||||
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
|
||||
> NOTE: MinIO runs console on random port by default if you wish choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
|
||||
### Things to consider
|
||||
|
||||
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
|
||||
|
||||
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
|
||||
@@ -218,34 +222,40 @@ Similarly, if your TLS certificates do not have the IP SAN for the MinIO server
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
|
||||
|
||||
# Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
## Upgrading MinIO
|
||||
|
||||
```
|
||||
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/minio/baremetal/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and do `mc admin service restart alias/`.
|
||||
|
||||
## Important things to remember during MinIO upgrades
|
||||
- For RPM/DEB installations, upgrade packages **parallelly** on all servers. Once upgraded, perform `systemctl restart minio` across all nodes in **parallel**. RPM/DEB based installations are usually automated using [`ansible`](https://github.com/minio/ansible-minio).
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
### Upgrade Checklist
|
||||
|
||||
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
|
||||
- Read the release notes for the targeted MinIO release *before* performing any installation, there is no forced requirement to upgrade to latest releases every week. If it has a bug fix you are looking for then yes, else avoid actively upgrading a running production system.
|
||||
- Make sure MinIO process has write access to `/opt/bin` if you plan to use `mc admin update`. This is needed for MinIO to download the latest binary from <https://dl.min.io> and save it locally for upgrades.
|
||||
- `mc admin update` is not supported in kubernetes/container environments, container environments provide their own mechanisms for container updates.
|
||||
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
|
||||
|
||||
## Explore Further
|
||||
|
||||
# Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
|
||||
@@ -253,9 +263,12 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
|
||||
- [The MinIO documentation website](https://docs.min.io)
|
||||
|
||||
# Contribute to MinIO Project
|
||||
## Contribute to MinIO Project
|
||||
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
# License
|
||||
MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
## License
|
||||
|
||||
- MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
- MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)
|
||||
|
||||
@@ -18,9 +18,10 @@ you need access credentials for a successful exploit).
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security team
|
||||
for the past five days please contact the security team directly:
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
### Disclosure Process
|
||||
|
||||
@@ -32,7 +33,7 @@ MinIO uses the following disclosure process:
|
||||
If the report is rejected the response explains why.
|
||||
3. Code is audited to find any potential similar problems.
|
||||
4. Fixes are prepared for the latest release.
|
||||
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
|
||||
5. On the date that the fixes are applied a security advisory will be published on <https://blog.min.io>.
|
||||
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
|
||||
the security issue. By default MinIO will **not** publish this information to protect your privacy.
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
## Vulnerability Management Policy
|
||||
# Vulnerability Management Policy
|
||||
|
||||
This document formally describes the process of addressing and managing a
|
||||
reported vulnerability that has been found in the MinIO server code base,
|
||||
any directly connected ecosystem component or a direct / indirect dependency
|
||||
of the code base.
|
||||
|
||||
### Scope
|
||||
## Scope
|
||||
|
||||
The vulnerability management policy described in this document covers the
|
||||
process of investigating, assessing and resolving a vulnerability report
|
||||
@@ -14,13 +14,13 @@ opened by a MinIO employee or an external third party.
|
||||
Therefore, it lists pre-conditions and actions that should be performed to
|
||||
resolve and fix a reported vulnerability.
|
||||
|
||||
### Vulnerability Management Process
|
||||
## Vulnerability Management Process
|
||||
|
||||
The vulnerability management process requires that the vulnerability report
|
||||
contains the following information:
|
||||
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
reported vulnerability and how it might be exploited. Alternatively,
|
||||
a well-established vulnerability identifier, e.g. CVE number, can be
|
||||
used instead.
|
||||
@@ -28,12 +28,11 @@ contains the following information:
|
||||
Based on the description mentioned above, a MinIO engineer or security team
|
||||
member investigates:
|
||||
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
|
||||
In general, if the vulnerability exists in one of the MinIO code bases
|
||||
itself - not in a code dependency - then MinIO will, if possible, fix
|
||||
the vulnerability or implement reasonable countermeasures such that the
|
||||
vulnerability cannot be exploited anymore.
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
**/*.swp
|
||||
cover.out
|
||||
*~
|
||||
minio
|
||||
!*/
|
||||
site/
|
||||
**/*.test
|
||||
**/*.sublime-workspace
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
build
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
.DS_Store
|
||||
*.syso
|
||||
coverage.txt
|
||||
node_modules
|
||||
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk2/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk2/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk3/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk3/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk4/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk4/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk5/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk5/bucket/testobj/xl.meta
Normal file
Binary file not shown.
92
buildscripts/minio-upgrade.sh
Normal file
92
buildscripts/minio-upgrade.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
docker volume prune -f
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
local sum1
|
||||
sum1=$(curl -s "$2" | sha256sum);
|
||||
mc admin heal --json -r "$1" >/dev/null; # test after healing
|
||||
local sum1_heal
|
||||
sum1_heal=$(curl -s "$2" | sha256sum);
|
||||
|
||||
if [ "${sum1_heal}" != "${sum1}" ]; then
|
||||
echo "mismatch expected ${sum1_heal}, got ${sum1}"
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
verify_checksum_mc() {
|
||||
local expected
|
||||
expected=$(mc cat "$1" | sha256sum)
|
||||
local got
|
||||
got=$(mc cat "$2" | sha256sum)
|
||||
|
||||
if [ "${expected}" != "${got}" ]; then
|
||||
echo "mismatch - expected ${expected}, got ${got}"
|
||||
exit 1;
|
||||
fi
|
||||
echo "matches - ${expected}, got ${got}"
|
||||
}
|
||||
|
||||
add_alias() {
|
||||
for i in $(seq 1 4); do
|
||||
echo "... attempting to add alias $i"
|
||||
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
|
||||
echo "...waiting... for 5secs" && sleep 5
|
||||
done
|
||||
done
|
||||
|
||||
echo "Sleeping for nginx"
|
||||
sleep 20
|
||||
}
|
||||
|
||||
__init__() {
|
||||
sudo apt install curl -y
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH=${PATH}:${GOPATH}/bin
|
||||
|
||||
go install github.com/minio/mc@latest
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
mc mb minio/minio-test/
|
||||
mc cp ./minio minio/minio-test/to-read/
|
||||
mc cp /etc/hosts minio/minio-test/to-read/hosts
|
||||
mc policy set download minio/minio-test
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
|
||||
|
||||
cleanup
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
|
||||
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
|
||||
for d in $(go list ./... | grep -v dsync); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
72
buildscripts/resolve-right-versions.sh
Executable file
72
buildscripts/resolve-right-versions.sh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function start_minio_5drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" stat minio/bucket/testobj
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_5drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
172
buildscripts/unaligned-healing.sh
Executable file
172
buildscripts/unaligned-healing.sh
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO_OLD=( "$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function download_old_release() {
|
||||
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
|
||||
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
|
||||
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
|
||||
fi
|
||||
}
|
||||
|
||||
function start_minio_16drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
|
||||
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
|
||||
"${WORK_DIR}/mc" cp \
|
||||
"${WORK_DIR}/unaligned" \
|
||||
minio/healing-shard-bucket/unaligned \
|
||||
--disable-multipart --quiet
|
||||
|
||||
## "unaligned" object name gets consistently distributed
|
||||
## to disks in following distribution order
|
||||
##
|
||||
## NOTE: if you change the name make sure to change the
|
||||
## distribution order present here
|
||||
##
|
||||
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
|
||||
|
||||
## make sure to remove the "last" data shard
|
||||
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
## Heal the shard
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
## then remove any other data shard let's pick first disk
|
||||
## - 1st data shard.
|
||||
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
download_old_release
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_16drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
81
buildscripts/upgrade-tests/compose.yml
Normal file
81
buildscripts/upgrade-tests/compose.yml
Normal file
@@ -0,0 +1,81 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
command: server http://minio{1...4}/data{1...3}
|
||||
env_file:
|
||||
- ./minio.env
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- data1-1:/data1
|
||||
- data1-2:/data2
|
||||
- data1-3:/data3
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- data2-1:/data1
|
||||
- data2-2:/data2
|
||||
- data2-3:/data3
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- data3-1:/data1
|
||||
- data3-2:/data2
|
||||
- data3-3:/data3
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- data4-1:/data1
|
||||
- data4-2:/data2
|
||||
- data4-3:/data3
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
data1-1:
|
||||
data1-2:
|
||||
data1-3:
|
||||
data2-1:
|
||||
data2-2:
|
||||
data2-3:
|
||||
data3-1:
|
||||
data3-2:
|
||||
data3-3:
|
||||
data4-1:
|
||||
data4-2:
|
||||
data4-3:
|
||||
3
buildscripts/upgrade-tests/minio.env
Normal file
3
buildscripts/upgrade-tests/minio.env
Normal file
@@ -0,0 +1,3 @@
|
||||
MINIO_ACCESS_KEY=minioadmin
|
||||
MINIO_SECRET_KEY=minioadmin
|
||||
MINIO_BROWSER=off
|
||||
68
buildscripts/upgrade-tests/nginx.conf
Normal file
68
buildscripts/upgrade-tests/nginx.conf
Normal file
@@ -0,0 +1,68 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
#gzip on;
|
||||
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
}
|
||||
|
||||
# main minio
|
||||
server {
|
||||
listen 9000;
|
||||
listen [::]:9000;
|
||||
server_name localhost;
|
||||
|
||||
# To allow special characters in headers
|
||||
ignore_invalid_headers off;
|
||||
# Allow any size file to be uploaded.
|
||||
# Set to a value such as 1000m; to restrict file size to a specific value
|
||||
client_max_body_size 0;
|
||||
# To disable buffering
|
||||
proxy_buffering off;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
|
||||
proxy_pass http://minio;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,8 @@ export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
||||
@@ -66,7 +68,7 @@ function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets-ipv6{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets-ipv6{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
|
||||
@@ -17,55 +17,75 @@ function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_port=$2
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
disown $!
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
disown $!
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
disown $!
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
if [ "$(pgrep -c minio)" -ne 3 ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
if ! ps -p $pid1 1>&2 > /dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 > /dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 > /dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1;
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
if grep -q 'Unable to initialize sub-systems' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -85,33 +105,36 @@ function __init__()
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 120
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
perform_test "2"
|
||||
perform_test "1"
|
||||
perform_test "3"
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
|
||||
@@ -114,8 +114,6 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketACLHandler - GET Bucket ACL
|
||||
@@ -164,8 +162,6 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutObjectACLHandler - PUT Object ACL
|
||||
@@ -229,8 +225,6 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectACLHandler - GET Object ACL
|
||||
@@ -283,6 +277,4 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
@@ -65,12 +65,33 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
quotaConfig, err := parseBucketQuota(bucket, data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
if quotaConfig.Type == "fifo" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -99,7 +120,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -155,7 +176,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
@@ -230,7 +251,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -324,7 +345,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
@@ -37,14 +37,17 @@ func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Reques
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
for _, action := range actions {
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
@@ -83,8 +86,28 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case SRError:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(e.Code, e.Cause)
|
||||
case decomError:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errDecommissionAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errDecommissionComplete):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
@@ -97,12 +120,30 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
case errors.Is(err, errIAMServiceAccount):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMServiceAccount",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errIAMServiceAccountUsed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMServiceAccountUsed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errIAMNotInitialized):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, errPolicyInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminPolicyInUse",
|
||||
Description: "The policy cannot be removed, as it is in use",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
@@ -141,6 +182,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierBackendNotEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendNotEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -63,6 +64,12 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
subSys, _, _, err := config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -73,11 +80,33 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic := config.SubSystemsDynamic.Contains(subSys)
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
}
|
||||
|
||||
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
|
||||
r *http.Request, w http.ResponseWriter,
|
||||
) {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// Tell the client that dynamic config was applied.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
@@ -117,7 +146,13 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
subSys, _, _, err := config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -135,14 +170,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
if dynamic {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// If all values were dynamic, tell the client.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -160,7 +188,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
cfg := globalServerConfig.Clone()
|
||||
vars := mux.Vars(r)
|
||||
var buf = &bytes.Buffer{}
|
||||
buf := &bytes.Buffer{}
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -205,11 +233,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
} else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +274,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, ""); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -326,7 +352,6 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(rd)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
@@ -360,7 +385,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, ""); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
199
cmd/admin-handlers-pools.go
Normal file
199
cmd/admin-handlers-pools.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StartDecommission")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.Decommission(r.Context(), idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "CancelDecommission")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StatusPool")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := pools.Status(r.Context(), idx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListPools")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
poolsStatus := make([]PoolStatus, len(globalEndpoints))
|
||||
for idx := range globalEndpoints {
|
||||
status, err := pools.Status(r.Context(), idx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
poolsStatus[idx] = status
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
@@ -45,16 +45,15 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
var sites []madmin.PeerSite
|
||||
errCode := readJSONBody(ctx, r.Body, &sites, cred.SecretKey)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
if err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status, errInfo := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
if errInfo.Code != ErrNone {
|
||||
logger.LogIf(ctx, errInfo)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(errInfo.Code, errInfo.Cause), r.URL)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -67,12 +66,12 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRInternalJoin - PUT /minio/admin/v3/site-replication/join
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
//
|
||||
// used internally to tell current cluster to enable SR with
|
||||
// the provided peer clusters and service account.
|
||||
func (a adminAPIHandlers) SRInternalJoin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalJoin")
|
||||
func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerJoin")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -81,24 +80,22 @@ func (a adminAPIHandlers) SRInternalJoin(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
var joinArg madmin.SRInternalJoinReq
|
||||
errCode := readJSONBody(ctx, r.Body, &joinArg, cred.SecretKey)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
var joinArg madmin.SRPeerJoinReq
|
||||
if err := parseJSONBody(ctx, r.Body, &joinArg, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
errInfo := globalSiteReplicationSys.InternalJoinReq(ctx, joinArg)
|
||||
if errInfo.Code != ErrNone {
|
||||
logger.LogIf(ctx, errInfo)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(errInfo.Code, errInfo.Cause), r.URL)
|
||||
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRInternalBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
|
||||
func (a adminAPIHandlers) SRInternalBucketOps(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalBucketOps")
|
||||
// SRPeerBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
|
||||
func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerBucketOps")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -113,13 +110,17 @@ func (a adminAPIHandlers) SRInternalBucketOps(w http.ResponseWriter, r *http.Req
|
||||
|
||||
var err error
|
||||
switch operation {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.MakeWithVersioningBktOp:
|
||||
_, isLockEnabled := r.Form["lockEnabled"]
|
||||
_, isVersioningEnabled := r.Form["versioningEnabled"]
|
||||
_, isForceCreate := r.Form["forceCreate"]
|
||||
opts := BucketOptions{
|
||||
Location: r.Form.Get("location"),
|
||||
LockEnabled: isLockEnabled,
|
||||
VersioningEnabled: isVersioningEnabled,
|
||||
ForceCreate: isForceCreate,
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
|
||||
case madmin.ConfigureReplBktOp:
|
||||
@@ -128,21 +129,17 @@ func (a adminAPIHandlers) SRInternalBucketOps(w http.ResponseWriter, r *http.Req
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, false)
|
||||
case madmin.ForceDeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, true)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// SRInternalReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||
func (a adminAPIHandlers) SRInternalReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalReplicateIAMItem")
|
||||
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||
func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerReplicateIAMItem")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -152,45 +149,51 @@ func (a adminAPIHandlers) SRInternalReplicateIAMItem(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
var item madmin.SRIAMItem
|
||||
errCode := readJSONBody(ctx, r.Body, &item, "")
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
switch item.Type {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRIAMItemPolicy:
|
||||
var policy *iampolicy.Policy
|
||||
if len(item.Policy) > 0 {
|
||||
policy, err = iampolicy.ParseConfig(bytes.NewReader(item.Policy))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
} else {
|
||||
policy, perr := iampolicy.ParseConfig(bytes.NewReader(item.Policy))
|
||||
if perr != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, perr), r.URL)
|
||||
return
|
||||
}
|
||||
if policy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy)
|
||||
}
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy)
|
||||
case madmin.SRIAMItemSvcAcc:
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, *item.SvcAccChange)
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange)
|
||||
case madmin.SRIAMItemPolicyMapping:
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, *item.PolicyMapping)
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping)
|
||||
case madmin.SRIAMItemSTSAcc:
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, *item.STSCredential)
|
||||
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential)
|
||||
case madmin.SRIAMItemIAMUser:
|
||||
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser)
|
||||
case madmin.SRIAMItemGroupInfo:
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRInternalReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
func (a adminAPIHandlers) SRInternalReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalReplicateIAMItem")
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerReplicateBucketItem")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -200,50 +203,54 @@ func (a adminAPIHandlers) SRInternalReplicateBucketItem(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
var item madmin.SRBucketMeta
|
||||
errCode := readJSONBody(ctx, r.Body, &item, "")
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
switch item.Type {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRBucketMetaTypePolicy:
|
||||
var bktPolicy *policy.Policy
|
||||
if len(item.Policy) > 0 {
|
||||
bktPolicy, err = policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
bktPolicy, berr := policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
|
||||
if berr != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, berr), r.URL)
|
||||
return
|
||||
}
|
||||
if bktPolicy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeQuotaConfig:
|
||||
if item.Quota == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
case madmin.SRBucketMetaTypeTags:
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags)
|
||||
case madmin.SRBucketMetaTypeObjectLockConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig)
|
||||
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationDisable - PUT /minio/admin/v3/site-replication/disable
|
||||
func (a adminAPIHandlers) SiteReplicationDisable(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationDisable")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationDisableAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -269,11 +276,9 @@ func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SRInternalGetIDPSettings(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationGetIDPSettings")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
@@ -288,28 +293,202 @@ func (a adminAPIHandlers) SRInternalGetIDPSettings(w http.ResponseWriter, r *htt
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func readJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) APIErrorCode {
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return ErrInvalidRequest
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if encryptionKey != "" {
|
||||
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrInvalidRequest
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(data, v)
|
||||
// SiteReplicationStatus - GET /minio/admin/v3/site-replication/status
|
||||
func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationStatus")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
opts := getSRStatusOptions(r)
|
||||
// default options to all if status options are unset for backward compatibility
|
||||
var dfltOpts madmin.SRStatusOptions
|
||||
if opts == dfltOpts {
|
||||
opts.Buckets = true
|
||||
opts.Users = true
|
||||
opts.Policies = true
|
||||
opts.Groups = true
|
||||
}
|
||||
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
return ErrAdminConfigBadJSON
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
return ErrNone
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationMetaInfo - GET /minio/admin/v3/site-replication/metainfo
|
||||
func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationMetaInfo")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
opts := getSRStatusOptions(r)
|
||||
info, err := globalSiteReplicationSys.SiteReplicationMetaInfo(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationEdit - PUT /minio/admin/v3/site-replication/edit
|
||||
func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationEdit")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
var site madmin.PeerInfo
|
||||
err := parseJSONBody(ctx, r.Body, &site, cred.SecretKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerEdit")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var pi madmin.PeerInfo
|
||||
if err := parseJSONBody(ctx, r.Body, &pi, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
q := r.Form
|
||||
opts.Buckets = q.Get("buckets") == "true"
|
||||
opts.Policies = q.Get("policies") == "true"
|
||||
opts.Groups = q.Get("groups") == "true"
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
return
|
||||
}
|
||||
|
||||
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
|
||||
func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationRemove")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
var rreq madmin.SRRemoveReq
|
||||
err := parseJSONBody(ctx, r.Body, &rreq, "")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerRemove - PUT /minio/admin/v3/site-replication/peer/remove
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerRemove")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var req madmin.SRRemoveReq
|
||||
if err := parseJSONBody(ctx, r.Body, &req, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
141
cmd/admin-handlers-users-race_test.go
Normal file
141
cmd/admin-handlers-users-race_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !race
|
||||
// +build !race
|
||||
|
||||
// Tests in this file are not run under the `-race` flag as they are too slow
|
||||
// and cause context deadline errors.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestDeleteUserRace(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
secretKeys[i] = secretKey
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
err := s.adm.RemoveUser(ctx, accessKeys[i])
|
||||
if err != nil {
|
||||
c.Fatalf("unable to remove user: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -58,21 +58,24 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
|
||||
if err := globalIAMSys.DeleteUser(ctx, accessKey, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete user.
|
||||
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: true,
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers - GET /minio/admin/v3/list-users?bucket={bucket}
|
||||
// ListBucketUsers - GET /minio/admin/v3/list-users?bucket={bucket}
|
||||
func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketUsers")
|
||||
|
||||
@@ -87,7 +90,7 @@ func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListBucketUsers(bucket)
|
||||
allCredentials, err := globalIAMSys.ListBucketUsers(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -121,12 +124,24 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
password := cred.SecretKey
|
||||
|
||||
allCredentials, err := globalIAMSys.ListUsers()
|
||||
allCredentials, err := globalIAMSys.ListUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Add ldap users which have mapped policies if in LDAP mode
|
||||
// FIXME(vadmeste): move this to policy info in the future
|
||||
ldapUsers, err := globalIAMSys.ListLDAPUsers()
|
||||
if err != nil && err != errIAMActionNotAllowed {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, v := range ldapUsers {
|
||||
allCredentials[k] = v
|
||||
}
|
||||
|
||||
// Marshal the response
|
||||
data, err := json.Marshal(allCredentials)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -164,34 +179,27 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := cred.ParentUser
|
||||
if accessKey == "" {
|
||||
accessKey = cred.AccessKey
|
||||
checkDenyOnly := false
|
||||
if name == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to view one's own info.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
// For temporary credentials always
|
||||
// the temporary credentials to check
|
||||
// policy without implicit permissions.
|
||||
if cred.IsTemp() && cred.ParentUser == globalActiveCred.AccessKey {
|
||||
accessKey = cred.AccessKey
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: checkDenyOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := name == accessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
userInfo, err := globalIAMSys.GetUserInfo(ctx, name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -231,24 +239,23 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
|
||||
err = globalIAMSys.RemoveUsersFromGroup(ctx, updReq.Group, updReq.Members)
|
||||
} else {
|
||||
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
|
||||
err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to load group.
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: updReq,
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,7 +299,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
groups, err := globalIAMSys.ListGroups()
|
||||
groups, err := globalIAMSys.ListGroups(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -323,11 +330,12 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
status := vars["status"]
|
||||
|
||||
var err error
|
||||
if status == statusEnabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, true)
|
||||
} else if status == statusDisabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, false)
|
||||
} else {
|
||||
switch status {
|
||||
case statusEnabled:
|
||||
err = globalIAMSys.SetGroupStatus(ctx, group, true)
|
||||
case statusDisabled:
|
||||
err = globalIAMSys.SetGroupStatus(ctx, group, false)
|
||||
default:
|
||||
err = errInvalidArgument
|
||||
}
|
||||
if err != nil {
|
||||
@@ -335,14 +343,18 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: madmin.GroupAddRemove{
|
||||
Group: group,
|
||||
Status: madmin.GroupStatus(status),
|
||||
IsRemove: false,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -361,25 +373,29 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
// This API is not allowed to lookup master access key user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.SetUserStatus(accessKey, madmin.AccountStatus(status)); err != nil {
|
||||
if err := globalIAMSys.SetUserStatus(ctx, accessKey, madmin.AccountStatus(status)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: false,
|
||||
UserReq: &madmin.AddOrUpdateUserReq{
|
||||
Status: madmin.AccountStatus(status),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,6 +427,14 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
userCred, exists := globalIAMSys.GetUser(ctx, accessKey)
|
||||
if exists && (userCred.IsTemp() || userCred.IsServiceAccount()) {
|
||||
// Updating STS credential is not allowed, and this API does not
|
||||
// support updating service accounts.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
|
||||
// Incoming access key matches parent user then we should
|
||||
// reject password change requests.
|
||||
@@ -418,39 +442,21 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := accessKey == cred.AccessKey
|
||||
if !implicitPerm {
|
||||
parentUser := cred.ParentUser
|
||||
if parentUser == "" {
|
||||
parentUser = cred.AccessKey
|
||||
}
|
||||
// For temporary credentials always
|
||||
// the temporary credentials to check
|
||||
// policy without implicit permissions.
|
||||
if cred.IsTemp() && cred.ParentUser == globalActiveCred.AccessKey {
|
||||
parentUser = cred.AccessKey
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true, // check if changing password is explicitly denied.
|
||||
DenyOnly: checkDenyOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
@@ -470,26 +476,28 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var uinfo madmin.UserInfo
|
||||
if err = json.Unmarshal(configBytes, &uinfo); err != nil {
|
||||
var ureq madmin.AddOrUpdateUserReq
|
||||
if err = json.Unmarshal(configBytes, &ureq); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
|
||||
if err = globalIAMSys.CreateUser(ctx, accessKey, ureq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: false,
|
||||
UserReq: &ureq,
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -564,6 +572,23 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
// credentials).
|
||||
if isSvcAccForRequestor {
|
||||
// Check if adding service account is explicitly denied.
|
||||
//
|
||||
// This allows turning off service accounts for request sender,
|
||||
// if there is no deny statement this call is implicitly enabled.
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx,
|
||||
@@ -587,6 +612,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// user <> to the request sender
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
@@ -630,24 +656,9 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call hook for cluster-replication.
|
||||
//
|
||||
// FIXME: This wont work in an OpenID situation as the parent credential
|
||||
// may not be present on peer clusters to provide inherited policies.
|
||||
// Also, we should not be replicating root user's service account - as
|
||||
// they are not authenticated by a common external IDP, so we skip when
|
||||
// opts.ldapUser == "".
|
||||
if _, isLDAPAccount := opts.claims[ldapUserN]; isLDAPAccount {
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
@@ -668,7 +679,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
createResp := madmin.AddServiceAccountResp{
|
||||
Credentials: madmin.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
@@ -771,24 +782,8 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call site replication hook. Only LDAP accounts are supported for
|
||||
// replication operations.
|
||||
svcAccClaims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, isLDAPAccount := svcAccClaims[ldapUserN]; isLDAPAccount {
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
@@ -805,6 +800,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -859,18 +855,15 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var svcAccountPolicy iampolicy.Policy
|
||||
|
||||
impliedPolicy := policy == nil
|
||||
|
||||
// If policy is empty, check for policy of the parent user
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = svcAccountPolicy.Merge(*policy)
|
||||
if policy != nil {
|
||||
svcAccountPolicy = *policy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, false)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
svcAccountPolicy = svcAccountPolicy.Merge(globalIAMSys.GetCombinedPolicy(policiesNames...))
|
||||
svcAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
|
||||
}
|
||||
|
||||
policyJSON, err := json.MarshalIndent(svcAccountPolicy, "", " ")
|
||||
@@ -879,10 +872,10 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var infoResp = madmin.InfoServiceAccountResp{
|
||||
infoResp := madmin.InfoServiceAccountResp{
|
||||
ParentUser: svcAccount.ParentUser,
|
||||
AccountStatus: svcAccount.Status,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
ImpliedPolicy: policy == nil,
|
||||
Policy: string(policyJSON),
|
||||
}
|
||||
|
||||
@@ -922,8 +915,10 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
|
||||
var targetAccount string
|
||||
|
||||
// If listing is requested for a specific user (who is not the request
|
||||
// sender), check that the user has permissions.
|
||||
user := r.Form.Get("user")
|
||||
if user != "" {
|
||||
if user != "" && user != cred.AccessKey {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListServiceAccountsAdminAction,
|
||||
@@ -954,7 +949,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
listResp := madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccountsNames,
|
||||
}
|
||||
|
||||
@@ -1026,27 +1021,14 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount, true)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, nerr := range globalNotificationSys.DeleteServiceAccount(serviceAccount) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call site replication hook. Only LDAP accounts are supported for
|
||||
// replication operations.
|
||||
svcAccClaims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, isLDAPAccount := svcAccClaims[ldapUserN]; isLDAPAccount {
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
@@ -1126,11 +1108,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
var err error
|
||||
if !globalIsGateway {
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err = loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// log the error, continue with the accounting response
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
dataUsageInfo, _ = loadDataUsageFromBackend(ctx, objectAPI)
|
||||
}
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
@@ -1161,19 +1139,11 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
var policies []string
|
||||
switch globalIAMSys.usersSysType {
|
||||
case MinIOUsersSysType:
|
||||
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
|
||||
case LDAPUsersSysType:
|
||||
parentUser := accountName
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
|
||||
default:
|
||||
err = errors.New("should never happen")
|
||||
if cred.IsTemp() || cred.IsServiceAccount() {
|
||||
// For derived credentials, check the parent user's permissions.
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false, cred.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -1207,17 +1177,13 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Fetch the prefix usage of the current bucket
|
||||
var prefixUsage map[string]uint64
|
||||
if enablePrefixUsage {
|
||||
if pu, err := loadPrefixUsageFromBackend(ctx, objectAPI, bucket.Name); err == nil {
|
||||
prefixUsage = pu
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
prefixUsage, _ = loadPrefixUsageFromBackend(ctx, objectAPI, bucket.Name)
|
||||
}
|
||||
|
||||
lcfg, _ := globalBucketObjectLockSys.Get(bucket.Name)
|
||||
quota, _ := globalBucketQuotaSys.Get(bucket.Name)
|
||||
rcfg, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name)
|
||||
tcfg, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name)
|
||||
quota, _ := globalBucketQuotaSys.Get(ctx, bucket.Name)
|
||||
rcfg, _, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name)
|
||||
tcfg, _, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name)
|
||||
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{
|
||||
Name: bucket.Name,
|
||||
@@ -1252,6 +1218,16 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
|
||||
//
|
||||
// Newer API response with policy timestamps is returned with query parameter
|
||||
// `v=2` like:
|
||||
//
|
||||
// GET /minio/admin/v3/info-canned-policy?name={policyName}&v=2
|
||||
//
|
||||
// The newer API will eventually become the default (and only) one. The older
|
||||
// response is to return only the policy JSON. The newer response returns
|
||||
// timestamps along with the policy JSON. Both versions are supported for now,
|
||||
// for smooth transition to new API.
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
@@ -1262,13 +1238,36 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
name := mux.Vars(r)["name"]
|
||||
policies := newMappedPolicy(name).toSlice()
|
||||
if len(policies) != 1 {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTooManyPolicies), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyDoc, err := globalIAMSys.InfoPolicy(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buf, err := json.MarshalIndent(policy, "", " ")
|
||||
// Is the new API version being requested?
|
||||
infoPolicyAPIVersion := r.Form.Get("v")
|
||||
if infoPolicyAPIVersion == "2" {
|
||||
buf, err := json.MarshalIndent(policyDoc, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Write(buf)
|
||||
return
|
||||
} else if infoPolicyAPIVersion != "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("invalid version parameter 'v' supplied")), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the older API response value of just the policy json.
|
||||
buf, err := json.MarshalIndent(policyDoc.Policy, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1288,13 +1287,13 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
policies, err := globalIAMSys.ListPolicies(bucket)
|
||||
policies, err := globalIAMSys.ListPolicies(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -1307,9 +1306,6 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
@@ -1323,13 +1319,13 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies("")
|
||||
policies, err := globalIAMSys.ListPolicies(ctx, "")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -1342,8 +1338,6 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
@@ -1360,19 +1354,11 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
if err := globalIAMSys.DeletePolicy(ctx, policyName, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete policy
|
||||
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call cluster-replication policy creation hook to replicate policy deletion to
|
||||
// other minio clusters.
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
@@ -1428,21 +1414,11 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
|
||||
if err = globalIAMSys.SetPolicy(ctx, policyName, *iamPolicy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload policy
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call cluster-replication policy creation hook to replicate policy to
|
||||
// other minio clusters.
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
@@ -1483,21 +1459,11 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
|
||||
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
|
||||
if err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, isGroup); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload policy
|
||||
if !globalIAMSys.HasWatcher() {
|
||||
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
PolicyMapping: &madmin.SRPolicyMapping{
|
||||
|
||||
@@ -18,8 +18,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -27,30 +35,44 @@ import (
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
cr "github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
testDefaultTimeout = 10 * time.Second
|
||||
testDefaultTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// API suite container for IAM
|
||||
type TestSuiteIAM struct {
|
||||
TestSuiteCommon
|
||||
|
||||
ServerTypeDescription string
|
||||
|
||||
// Flag to turn on tests for etcd backend IAM
|
||||
withEtcdBackend bool
|
||||
|
||||
endpoint string
|
||||
adm *madmin.AdminClient
|
||||
client *minio.Client
|
||||
}
|
||||
|
||||
func newTestSuiteIAM(c TestSuiteCommon) *TestSuiteIAM {
|
||||
return &TestSuiteIAM{TestSuiteCommon: c}
|
||||
func newTestSuiteIAM(c TestSuiteCommon, withEtcdBackend bool) *TestSuiteIAM {
|
||||
etcdStr := ""
|
||||
if withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
return &TestSuiteIAM{
|
||||
TestSuiteCommon: c,
|
||||
ServerTypeDescription: fmt.Sprintf("%s%s", c.serverType, etcdStr),
|
||||
withEtcdBackend: withEtcdBackend,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
s.TestSuiteCommon.SetUpSuite(c)
|
||||
|
||||
func (s *TestSuiteIAM) iamSetup(c *check) {
|
||||
var err error
|
||||
// strip url scheme from endpoint
|
||||
s.endpoint = strings.TrimPrefix(s.endPoint, "http://")
|
||||
@@ -75,6 +97,62 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
EnvTestEtcdBackend = "ETCD_SERVER"
|
||||
)
|
||||
|
||||
func (s *TestSuiteIAM) setUpEtcd(c *check, etcdServer string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
configCmds := []string{
|
||||
"etcd",
|
||||
"endpoints=" + etcdServer,
|
||||
"path_prefix=" + mustGetUUID(),
|
||||
}
|
||||
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
|
||||
if err != nil {
|
||||
c.Fatalf("unable to setup Etcd for tests: %v", err)
|
||||
}
|
||||
|
||||
s.RestartIAMSuite(c)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
// If etcd backend is specified and etcd server is not present, the test
|
||||
// is skipped.
|
||||
etcdServer := os.Getenv(EnvTestEtcdBackend)
|
||||
if s.withEtcdBackend && etcdServer == "" {
|
||||
c.Skip("Skipping etcd backend IAM test as no etcd server is configured.")
|
||||
}
|
||||
|
||||
s.TestSuiteCommon.SetUpSuite(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
|
||||
if s.withEtcdBackend {
|
||||
s.setUpEtcd(c, etcdServer)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
|
||||
s.TestSuiteCommon.RestartTestServer(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) getAdminClient(c *check, accessKey, secretKey, sessionToken string) *madmin.AdminClient {
|
||||
madmClnt, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
|
||||
Creds: credentials.NewStaticV4(accessKey, secretKey, sessionToken),
|
||||
Secure: s.secure,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("error creating user admin client: %s", err)
|
||||
}
|
||||
madmClnt.SetCustomTransport(s.TestSuiteCommon.client.Transport)
|
||||
return madmClnt
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) getUserClient(c *check, accessKey, secretKey, sessionToken string) *minio.Client {
|
||||
client, err := minio.New(s.endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKey, secretKey, sessionToken),
|
||||
@@ -87,29 +165,47 @@ func (s *TestSuiteIAM) getUserClient(c *check, accessKey, secretKey, sessionToke
|
||||
return client
|
||||
}
|
||||
|
||||
func runAllIAMTests(suite *TestSuiteIAM, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestUserCreate(c)
|
||||
suite.TestPolicyCreate(c)
|
||||
suite.TestGroupAddRemove(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPServerSuite(t *testing.T) {
|
||||
testCases := []*TestSuiteIAM{
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4}),
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4, secure: true}),
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "Erasure", signer: signerV4}),
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "ErasureSet", signer: signerV4}),
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
|
||||
runAllIAMTests(testCase, &check{t, testCase.serverType})
|
||||
})
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
suite := testCase
|
||||
c := &check{t, testCase.serverType}
|
||||
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestUserCreate(c)
|
||||
suite.TestUserPolicyEscalationBug(c)
|
||||
suite.TestPolicyCreate(c)
|
||||
suite.TestCannedPolicies(c)
|
||||
suite.TestGroupAddRemove(c)
|
||||
suite.TestServiceAccountOpsByAdmin(c)
|
||||
suite.TestServiceAccountOpsByUser(c)
|
||||
suite.TestAddServiceAccountPerms(c)
|
||||
suite.TearDownSuite(c)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,6 +242,24 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
c.Fatalf("user could not create bucket: %v", err)
|
||||
}
|
||||
|
||||
// 3.10. Check that user's password can be updated.
|
||||
_, newSecretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, newSecretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to update user's secret key: %v", err)
|
||||
}
|
||||
// 3.10.1 Check that old password no longer works.
|
||||
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
|
||||
if err == nil {
|
||||
c.Fatalf("user was unexpectedly able to create bucket with bad password!")
|
||||
}
|
||||
// 3.10.2 Check that new password works.
|
||||
client = s.getUserClient(c, accessKey, newSecretKey, "")
|
||||
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("user could not create bucket: %v", err)
|
||||
}
|
||||
|
||||
// 4. Check that user can be disabled and verify it.
|
||||
err = s.adm.SetUserStatus(ctx, accessKey, madmin.AccountDisabled)
|
||||
if err != nil {
|
||||
@@ -184,6 +298,220 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// 2. Create a user, associate policy and verify access
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
// 2.1 check that user does not have any access to the bucket
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 2.2 create and associate policy to user
|
||||
policy := "mypolicy-test-user-update"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
// 2.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
// 2.3 check that user cannot delete the bucket
|
||||
err = uClient.RemoveBucket(ctx, bucket)
|
||||
if err == nil || err.Error() != "Access Denied." {
|
||||
c.Fatalf("bucket was deleted unexpectedly or got unexpected err: %v", err)
|
||||
}
|
||||
|
||||
// 3. Craft a request to update the user's permissions
|
||||
ep := s.adm.GetEndpointURL()
|
||||
urlValue := url.Values{}
|
||||
urlValue.Add("accessKey", accessKey)
|
||||
u, err := url.Parse(fmt.Sprintf("%s://%s/minio/admin/v3/add-user?%s", ep.Scheme, ep.Host, s3utils.QueryEncode(urlValue)))
|
||||
if err != nil {
|
||||
c.Fatalf("unexpected url parse err: %v", err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.String(), nil)
|
||||
if err != nil {
|
||||
c.Fatalf("unexpected new request err: %v", err)
|
||||
}
|
||||
reqBodyArg := madmin.UserInfo{
|
||||
SecretKey: secretKey,
|
||||
PolicyName: "consoleAdmin",
|
||||
Status: madmin.AccountEnabled,
|
||||
}
|
||||
buf, err := json.Marshal(reqBodyArg)
|
||||
if err != nil {
|
||||
c.Fatalf("unexpected json encode err: %v", err)
|
||||
}
|
||||
buf, err = madmin.EncryptData(secretKey, buf)
|
||||
if err != nil {
|
||||
c.Fatalf("unexpected encryption err: %v", err)
|
||||
}
|
||||
|
||||
req.ContentLength = int64(len(buf))
|
||||
sum := sha256.Sum256(buf)
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(buf))
|
||||
req = signer.SignV4(*req, accessKey, secretKey, "", "")
|
||||
|
||||
// 3.1 Execute the request.
|
||||
resp, err := s.TestSuiteCommon.client.Do(req)
|
||||
if err != nil {
|
||||
c.Fatalf("unexpected request err: %v", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
c.Fatalf("got unexpected response: %#v\n", resp)
|
||||
}
|
||||
|
||||
// 3.2 check that user cannot delete the bucket
|
||||
err = uClient.RemoveBucket(ctx, bucket)
|
||||
if err == nil || err.Error() != "Access Denied." {
|
||||
c.Fatalf("User was able to escalate privileges (Err=%v)!", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
// 1. Create a policy
|
||||
policy1 := "deny-svc"
|
||||
policy2 := "allow-svc"
|
||||
policyBytes := []byte(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Deny",
|
||||
"Action": [
|
||||
"admin:CreateServiceAccount"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
|
||||
newPolicyBytes := []byte(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::testbucket/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
|
||||
err := s.adm.AddCannedPolicy(ctx, policy1, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.AddCannedPolicy(ctx, policy2, newPolicyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
// 2. Verify that policy json is validated by server
|
||||
invalidPolicyBytes := policyBytes[:len(policyBytes)-1]
|
||||
err = s.adm.AddCannedPolicy(ctx, policy1+"invalid", invalidPolicyBytes)
|
||||
if err == nil {
|
||||
c.Fatalf("invalid policy creation success")
|
||||
}
|
||||
|
||||
// 3. Create a user, associate policy and verify access
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
// 3.1 check that user does not have any access to the bucket
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
c.mustNotListObjects(ctx, uClient, "testbucket")
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
|
||||
|
||||
// 3.3 check user does not have explicit permissions to create service account.
|
||||
c.mustNotCreateSvcAccount(ctx, accessKey, admClnt)
|
||||
|
||||
// 4. Verify the policy appears in listing
|
||||
ps, err := s.adm.ListCannedPolicies(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("policy list err: %v", err)
|
||||
}
|
||||
_, ok := ps[policy1]
|
||||
if !ok {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.3 check user can create service account implicitly.
|
||||
c.mustCreateSvcAccount(ctx, accessKey, admClnt)
|
||||
|
||||
_, ok = ps[policy2]
|
||||
if !ok {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
err = s.adm.RemoveUser(ctx, accessKey)
|
||||
if err != nil {
|
||||
c.Fatalf("user could not be deleted: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.RemoveCannedPolicy(ctx, policy1)
|
||||
if err != nil {
|
||||
c.Fatalf("policy del err: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.RemoveCannedPolicy(ctx, policy2)
|
||||
if err != nil {
|
||||
c.Fatalf("policy del err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
@@ -232,25 +560,15 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
}
|
||||
// 3.1 check that user does not have any access to the bucket
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
res := uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
// 3.3 check user has access to bucket
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if ok {
|
||||
c.Fatalf("list channel was not closed - unexpected error or objects in listing: %v", v)
|
||||
}
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
// 3.4 Check that user cannot exceed their permissions
|
||||
err = uClient.RemoveBucket(ctx, bucket)
|
||||
if err == nil {
|
||||
@@ -262,15 +580,88 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("policy list err: %v", err)
|
||||
}
|
||||
_, ok = ps[policy]
|
||||
_, ok := ps[policy]
|
||||
if !ok {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 5. Check that policy can be deleted.
|
||||
// 5. Check that policy cannot be deleted when attached to a user.
|
||||
err = s.adm.RemoveCannedPolicy(ctx, policy)
|
||||
if err == nil {
|
||||
c.Fatalf("policy could be unexpectedly deleted!")
|
||||
}
|
||||
|
||||
// 6. Delete the user and then delete the policy.
|
||||
err = s.adm.RemoveUser(ctx, accessKey)
|
||||
if err != nil {
|
||||
c.Fatalf("user could not be deleted: %v", err)
|
||||
}
|
||||
err = s.adm.RemoveCannedPolicy(ctx, policy)
|
||||
if err != nil {
|
||||
c.Fatalf("policy delete err: %v", err)
|
||||
c.Fatalf("policy del err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
policies, err := s.adm.ListCannedPolicies(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to list policies: %v", err)
|
||||
}
|
||||
|
||||
defaultPolicies := []string{
|
||||
"readwrite",
|
||||
"readonly",
|
||||
"writeonly",
|
||||
"diagnostics",
|
||||
"consoleAdmin",
|
||||
}
|
||||
|
||||
for _, v := range defaultPolicies {
|
||||
if _, ok := policies[v]; !ok {
|
||||
c.Fatalf("Failed to find %s in policies list", v)
|
||||
}
|
||||
}
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err = s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
info, err := s.adm.InfoCannedPolicy(ctx, "readwrite")
|
||||
if err != nil {
|
||||
c.Fatalf("policy info err: %v", err)
|
||||
}
|
||||
|
||||
infoStr := string(info)
|
||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||
c.Fatalf("policy contains unexpected content!")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,14 +715,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
|
||||
// 2. Check that user has no access
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
res := uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3. Associate policy to group and check user got access.
|
||||
err = s.adm.SetPolicy(ctx, policy, group, true)
|
||||
@@ -339,11 +723,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
// 3.1 check user has access to bucket
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if ok {
|
||||
c.Fatalf("list channel was not closed - unexpected error or objects in listing: %v", v)
|
||||
}
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
// 3.2 Check that user cannot exceed their permissions
|
||||
err = uClient.RemoveBucket(ctx, bucket)
|
||||
if err == nil {
|
||||
@@ -377,14 +757,8 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
c.Fatalf("group desc err: %v", err)
|
||||
}
|
||||
c.Assert(groupInfo.Status, string(madmin.GroupDisabled))
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
err = s.adm.SetGroupStatus(ctx, group, madmin.GroupEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("group set status err: %v", err)
|
||||
@@ -394,11 +768,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
c.Fatalf("group desc err: %v", err)
|
||||
}
|
||||
c.Assert(groupInfo.Status, string(madmin.GroupEnabled))
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if ok {
|
||||
c.Fatalf("list channel was not closed - unexpected error or objects in listing: %v", v)
|
||||
}
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 6. Verify that group cannot be deleted with users.
|
||||
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
|
||||
@@ -423,14 +793,8 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("group update err: %v", err)
|
||||
}
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 7.1 verify group still exists
|
||||
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
|
||||
if err != nil {
|
||||
@@ -460,6 +824,362 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
|
||||
Creds: cr.NewStaticV4(accessKey, secretKey, ""),
|
||||
Secure: s.secure,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Err creating user admin client: %v", err)
|
||||
}
|
||||
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
|
||||
|
||||
// Create svc acc
|
||||
cr := c.mustCreateSvcAccount(ctx, accessKey, userAdmClient)
|
||||
|
||||
// 1. Check that svc account appears in listing
|
||||
c.assertSvcAccAppearsInListing(ctx, userAdmClient, accessKey, cr.AccessKey)
|
||||
|
||||
// 2. Check that svc account info can be queried
|
||||
c.assertSvcAccInfoQueryable(ctx, userAdmClient, accessKey, cr.AccessKey, false)
|
||||
|
||||
// 3. Check S3 access
|
||||
c.assertSvcAccS3Access(ctx, s, cr, bucket)
|
||||
|
||||
// 4. Check that svc account can restrict the policy, and that the
|
||||
// session policy can be updated.
|
||||
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 4. Check that service account's secret key and account status can be
|
||||
// updated.
|
||||
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 6. Check that service account cannot be created for some other user.
|
||||
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a service account for the user
|
||||
cr := c.mustCreateSvcAccount(ctx, accessKey, s.adm)
|
||||
|
||||
// 1.2 Check that svc account appears in listing
|
||||
c.assertSvcAccAppearsInListing(ctx, s.adm, accessKey, cr.AccessKey)
|
||||
|
||||
// 1.3 Check that svc account info can be queried
|
||||
c.assertSvcAccInfoQueryable(ctx, s.adm, accessKey, cr.AccessKey, false)
|
||||
|
||||
// 2. Check that svc account can access the bucket
|
||||
c.assertSvcAccS3Access(ctx, s, cr, bucket)
|
||||
|
||||
// 3. Check that svc account can restrict the policy, and that the
|
||||
// session policy can be updated.
|
||||
c.assertSvcAccSessionPolicyUpdate(ctx, s, s.adm, accessKey, bucket)
|
||||
|
||||
// 4. Check that service account's secret key and account status can be
|
||||
// updated.
|
||||
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, s.adm, accessKey, bucket)
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, s.adm, accessKey, bucket)
|
||||
}
|
||||
|
||||
func (c *check) mustCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) madmin.Credentials {
|
||||
randUser := mustGetUUID()
|
||||
randPass := mustGetUUID()
|
||||
err := admClnt.AddUser(ctx, randUser, randPass)
|
||||
if err != nil {
|
||||
c.Fatalf("should be able to create a user: %v", err)
|
||||
}
|
||||
return madmin.Credentials{
|
||||
AccessKey: randUser,
|
||||
SecretKey: randPass,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustGetIAMUserInfo(ctx context.Context, admClnt *madmin.AdminClient, accessKey string) madmin.UserInfo {
|
||||
ui, err := admClnt.GetUserInfo(ctx, accessKey)
|
||||
if err != nil {
|
||||
c.Fatalf("should be able to get user info: %v", err)
|
||||
}
|
||||
return ui
|
||||
}
|
||||
|
||||
func (c *check) mustNotCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) {
|
||||
randUser := mustGetUUID()
|
||||
randPass := mustGetUUID()
|
||||
err := admClnt.AddUser(ctx, randUser, randPass)
|
||||
if err == nil {
|
||||
c.Fatalf("should not be able to create a user")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustCreateSvcAccount(ctx context.Context, tgtUser string, admClnt *madmin.AdminClient) madmin.Credentials {
|
||||
cr, err := admClnt.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: tgtUser,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("user should be able to create service accounts %s", err)
|
||||
}
|
||||
return cr
|
||||
}
|
||||
|
||||
func (c *check) mustNotCreateSvcAccount(ctx context.Context, tgtUser string, admClnt *madmin.AdminClient) {
|
||||
_, err := admClnt.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: tgtUser,
|
||||
})
|
||||
if err == nil {
|
||||
c.Fatalf("user was able to add service accounts unexpectedly!")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustNotListObjects(ctx context.Context, client *minio.Client, bucket string) {
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok || v.Err == nil {
|
||||
c.Fatalf("user was able to list unexpectedly!")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustListObjects(ctx context.Context, client *minio.Client, bucket string) {
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if ok && v.Err != nil {
|
||||
msg := fmt.Sprintf("user was unable to list: %v", v.Err)
|
||||
c.Fatalf(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccS3Access(ctx context.Context, s *TestSuiteIAM, cr madmin.Credentials, bucket string) {
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccAppearsInListing(ctx context.Context, madmClient *madmin.AdminClient, parentAK, svcAK string) {
|
||||
listResp, err := madmClient.ListServiceAccounts(ctx, parentAK)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to list svc accounts: %v", err)
|
||||
}
|
||||
if !set.CreateStringSet(listResp.Accounts...).Contains(svcAK) {
|
||||
c.Fatalf("service account did not appear in listing!")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccInfoQueryable(ctx context.Context, madmClient *madmin.AdminClient, parentAK, svcAK string, skipParentUserCheck bool) {
|
||||
infoResp, err := madmClient.InfoServiceAccount(ctx, svcAK)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to get svc acc info: %v", err)
|
||||
}
|
||||
if !skipParentUserCheck {
|
||||
c.Assert(infoResp.ParentUser, parentAK)
|
||||
}
|
||||
c.Assert(infoResp.AccountStatus, "on")
|
||||
c.Assert(infoResp.ImpliedPolicy, true)
|
||||
}
|
||||
|
||||
// This test assumes that the policy for `accessKey` allows listing on the given
|
||||
// bucket. It creates a session policy that restricts listing on the bucket and
|
||||
// then enables it again in a session policy update call.
|
||||
func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
|
||||
// This policy allows listing objects.
|
||||
newPolicyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewPolicy: newPolicyBytes,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to update session policy for svc acc: %v", err)
|
||||
}
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccSecretKeyAndStatusUpdate(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
|
||||
_, svcSK2 := mustGenerateCredentials(c)
|
||||
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewSecretKey: svcSK2,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to update secret key for svc acc: %v", err)
|
||||
}
|
||||
// old creds should not work:
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
// new creds work:
|
||||
svcClient2 := s.getUserClient(c, cr.AccessKey, svcSK2, "")
|
||||
c.mustListObjects(ctx, svcClient2, bucket)
|
||||
|
||||
// update status to disabled
|
||||
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewStatus: "off",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to update secret key for svc acc: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, svcClient2, bucket)
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccDeletion(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
|
||||
err = madmClient.DeleteServiceAccount(ctx, svcAK)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to delete svc acc: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
func mustGenerateCredentials(c *check) (string, string) {
|
||||
ak, sk, err := auth.GenerateCredentials()
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,14 +18,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -36,6 +38,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
@@ -190,7 +193,11 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// ServiceHandler - POST /minio/admin/v3/service?action={action}
|
||||
// ----------
|
||||
// restarts/stops minio server gracefully. In a distributed setup,
|
||||
// Supports following actions:
|
||||
// - restart (restarts all the MinIO instances in a setup)
|
||||
// - stop (stops all the MinIO instances in a setup)
|
||||
// - freeze (freezes all incoming S3 API calls)
|
||||
// - unfreeze (unfreezes previously frozen S3 API calls)
|
||||
func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Service")
|
||||
|
||||
@@ -205,6 +212,10 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
serviceSig = serviceRestart
|
||||
case madmin.ServiceActionStop:
|
||||
serviceSig = serviceStop
|
||||
case madmin.ServiceActionFreeze:
|
||||
serviceSig = serviceFreeze
|
||||
case madmin.ServiceActionUnfreeze:
|
||||
serviceSig = serviceUnFreeze
|
||||
default:
|
||||
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
@@ -217,6 +228,8 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
||||
case serviceFreeze, serviceUnFreeze:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceFreezeAdminAction)
|
||||
}
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -233,7 +246,14 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
// Reply to the client before restarting, stopping MinIO server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
globalServiceSignalCh <- serviceSig
|
||||
switch serviceSig {
|
||||
case serviceFreeze:
|
||||
freezeServices()
|
||||
case serviceUnFreeze:
|
||||
unfreezeServices()
|
||||
case serviceRestart, serviceStop:
|
||||
globalServiceSignalCh <- serviceSig
|
||||
}
|
||||
}
|
||||
|
||||
// ServerProperties holds some server information such as, version, region
|
||||
@@ -266,6 +286,7 @@ type ServerHTTPAPIStats struct {
|
||||
// including their average execution time.
|
||||
type ServerHTTPStats struct {
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
S3RequestsIncoming uint64 `json:"s3RequestsIncoming"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
@@ -316,7 +337,6 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
}
|
||||
|
||||
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
||||
@@ -481,7 +501,7 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// StartProfilingResult contains the status of the starting
|
||||
// profiling action in a given server
|
||||
// profiling action in a given server - deprecated API
|
||||
type StartProfilingResult struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
Success bool `json:"success"`
|
||||
@@ -575,6 +595,85 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponseJSON(w, startProfilingResultInBytes)
|
||||
}
|
||||
|
||||
// ProfileHandler - POST /minio/admin/v3/profile/?profilerType={profilerType}
|
||||
// ----------
|
||||
// Enable server profiling
|
||||
func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Profile")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Validate request signature.
|
||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ProfilingAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
profileStr := r.Form.Get("profilerType")
|
||||
profiles := strings.Split(profileStr, ",")
|
||||
duration := time.Minute
|
||||
if dstr := r.Form.Get("duration"); dstr != "" {
|
||||
var err error
|
||||
duration, err = time.ParseDuration(dstr)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
// read request body
|
||||
io.CopyN(ioutil.Discard, r.Body, 1)
|
||||
|
||||
globalProfilerMu.Lock()
|
||||
|
||||
if globalProfiler == nil {
|
||||
globalProfiler = make(map[string]minioProfiler, 10)
|
||||
}
|
||||
|
||||
// Stop profiler of all types if already running
|
||||
for k, v := range globalProfiler {
|
||||
v.Stop()
|
||||
delete(globalProfiler, k)
|
||||
}
|
||||
|
||||
// Start profiling on remote servers.
|
||||
for _, profiler := range profiles {
|
||||
globalNotificationSys.StartProfiling(profiler)
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
if err == nil {
|
||||
globalProfiler[profiler] = prof
|
||||
}
|
||||
}
|
||||
globalProfilerMu.Unlock()
|
||||
|
||||
timer := time.NewTimer(duration)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
for k, v := range globalProfiler {
|
||||
v.Stop()
|
||||
delete(globalProfiler, k)
|
||||
}
|
||||
return
|
||||
case <-timer.C:
|
||||
if !globalNotificationSys.DownloadProfilingData(ctx, w) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dummyFileInfo represents a dummy representation of a profile data file
|
||||
// present only in memory, it helps to generate the zip stream.
|
||||
type dummyFileInfo struct {
|
||||
@@ -595,7 +694,7 @@ func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
||||
|
||||
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
||||
// ----------
|
||||
// Download profiling information of all nodes in a zip format
|
||||
// Download profiling information of all nodes in a zip format - deprecated API
|
||||
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DownloadProfiling")
|
||||
|
||||
@@ -756,13 +855,17 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
// Send whitespace and keep connection open
|
||||
w.Write([]byte(" "))
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case hr := <-respCh:
|
||||
switch hr.apiErr {
|
||||
case noError:
|
||||
if started {
|
||||
w.Write(hr.respBytes)
|
||||
if _, err := w.Write(hr.respBytes); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
} else {
|
||||
writeSuccessResponseJSON(w, hr.respBytes)
|
||||
@@ -787,7 +890,9 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
w.WriteHeader(hr.apiErr.HTTPStatusCode)
|
||||
}
|
||||
w.Write(errorRespJSON)
|
||||
if _, err := w.Write(errorRespJSON); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
break forLoop
|
||||
@@ -911,12 +1016,65 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SpeedtestHandler")
|
||||
// NetperfHandler - perform mesh style network throughput test
|
||||
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetperfHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsDistErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "netperf")
|
||||
lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
|
||||
return
|
||||
}
|
||||
defer nsLock.Unlock(lkctx.Cancel)
|
||||
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
|
||||
if duration < globalNetPerfMinDuration {
|
||||
// We need sample size of minimum 10 secs.
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
|
||||
duration = duration.Round(time.Second)
|
||||
|
||||
results := globalNotificationSys.Netperf(ctx, duration)
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(madmin.NetperfResult{NodeResults: results}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SpeedtestHandler - Deprecated. See ObjectSpeedtestHandler
|
||||
func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
a.ObjectSpeedtestHandler(w, r)
|
||||
}
|
||||
|
||||
// ObjectSpeedtestHandler - reports maximum speed of a cluster by performing PUT and
|
||||
// GET operations on the server, supports auto tuning by default by automatically
|
||||
// increasing concurrency and stopping when we have reached the limits on the
|
||||
// system.
|
||||
func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ObjectSpeedtestHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -929,12 +1087,8 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
autotuneStr := r.Form.Get("autotune")
|
||||
|
||||
var autotune bool
|
||||
if autotuneStr != "" {
|
||||
autotune = true
|
||||
}
|
||||
autotune := r.Form.Get("autotune") == "true"
|
||||
storageClass := r.Form.Get("storage-class")
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
@@ -946,59 +1100,206 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
||||
concurrent = 32
|
||||
}
|
||||
|
||||
if runtime.GOMAXPROCS(0) < concurrent {
|
||||
concurrent = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = time.Second * 10
|
||||
}
|
||||
|
||||
throughputSize := size
|
||||
iopsSize := size
|
||||
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune)
|
||||
|
||||
if !sufficientCapacity {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
||||
Code: "XMinioSpeedtestInsufficientCapacity",
|
||||
Message: capacityErrMsg,
|
||||
StatusCode: http.StatusInsufficientStorage,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if autotune && !canAutotune {
|
||||
autotune = false
|
||||
}
|
||||
|
||||
bucketExists, err := makeObjectPerfBucket(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !bucketExists {
|
||||
defer deleteObjectPerfBucket(objectAPI)
|
||||
}
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
ch := objectSpeedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer) (bucketExists bool, err error) {
|
||||
err = objectAPI.MakeBucketWithLocation(ctx, globalObjectPerfBucket, BucketOptions{})
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
// Only BucketExists error can be ignored.
|
||||
return false, err
|
||||
}
|
||||
bucketExists = true
|
||||
}
|
||||
return bucketExists, nil
|
||||
}
|
||||
|
||||
func deleteObjectPerfBucket(objectAPI ObjectLayer) {
|
||||
objectAPI.DeleteBucket(context.Background(), globalObjectPerfBucket, DeleteBucketOptions{
|
||||
Force: true,
|
||||
NoRecreate: true,
|
||||
})
|
||||
}
|
||||
|
||||
func validateObjPerfOptions(ctx context.Context, objectAPI ObjectLayer, concurrent int, size int, autotune bool) (sufficientCapacity bool, canAutotune bool, capacityErrMsg string) {
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
capacityNeeded := uint64(concurrent * size)
|
||||
capacity := uint64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo))
|
||||
|
||||
if capacity < capacityNeeded {
|
||||
return false, false, fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
|
||||
humanize.IBytes(capacityNeeded), humanize.IBytes(capacity))
|
||||
}
|
||||
|
||||
// Verify if we can employ autotune without running out of capacity,
|
||||
// if we do run out of capacity, make sure to turn-off autotuning
|
||||
// in such situations.
|
||||
if autotune {
|
||||
iopsSize = 4 * humanize.KiByte
|
||||
newConcurrent := concurrent + (concurrent+1)/2
|
||||
autoTunedCapacityNeeded := uint64(newConcurrent * size)
|
||||
if capacity < autoTunedCapacityNeeded {
|
||||
// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
|
||||
return true, false, ""
|
||||
}
|
||||
}
|
||||
|
||||
return true, autotune, ""
|
||||
}
|
||||
|
||||
// NetSpeedtestHandler - reports maximum network throughput
|
||||
func (a adminAPIHandlers) NetSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetSpeedtestHandler")
|
||||
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
}
|
||||
|
||||
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
|
||||
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DriveSpeedtestHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
serial := r.Form.Get("serial") == "true"
|
||||
blockSizeStr := r.Form.Get("blocksize")
|
||||
fileSizeStr := r.Form.Get("filesize")
|
||||
|
||||
blockSize, err := strconv.ParseUint(blockSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
blockSize = 4 * humanize.MiByte // default value
|
||||
}
|
||||
|
||||
fileSize, err := strconv.ParseUint(fileSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
fileSize = 1 * humanize.GiByte // default value
|
||||
}
|
||||
|
||||
opts := madmin.DriveSpeedTestOpts{
|
||||
Serial: serial,
|
||||
BlockSize: blockSize,
|
||||
FileSize: fileSize,
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
endBlankRepliesCh := make(chan error)
|
||||
enc := json.NewEncoder(w)
|
||||
ch := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// local driveSpeedTest
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
endBlankRepliesCh <- nil
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := json.NewEncoder(w).Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
endBlankRepliesCh <- err
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case endBlankRepliesCh <- nil:
|
||||
return
|
||||
}
|
||||
defer wg.Done()
|
||||
enc.Encode(driveSpeedTest(ctx, opts))
|
||||
if wf, ok := w.(http.Flusher); ok {
|
||||
wf.Flush()
|
||||
}
|
||||
}()
|
||||
|
||||
result, err := speedTest(ctx, throughputSize, iopsSize, concurrent, duration, autotune)
|
||||
if <-endBlankRepliesCh != nil {
|
||||
return
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
goto endloop
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
||||
goto endloop
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
goto endloop
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
goto endloop
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(result); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objectAPI.DeleteBucket(ctx, pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix), DeleteBucketOptions{Force: true, NoRecreate: true})
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
endloop:
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Admin API errors
|
||||
@@ -1208,10 +1509,10 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
if err := enc.Encode(log); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(logCh) > 0 {
|
||||
@@ -1313,7 +1614,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
}
|
||||
var response = madmin.KMSKeyStatus{
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
}
|
||||
|
||||
@@ -1370,6 +1671,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
//nolint:gocritic
|
||||
if err != nil {
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
@@ -1397,6 +1699,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
|
||||
buckets := madmin.Buckets{}
|
||||
objects := madmin.Objects{}
|
||||
versions := madmin.Versions{}
|
||||
usage := madmin.Usage{}
|
||||
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -1408,6 +1711,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
versions = madmin.Versions{Count: dataUsageInfo.VersionsTotalCount}
|
||||
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
|
||||
} else {
|
||||
buckets = madmin.Buckets{Error: err.Error()}
|
||||
@@ -1451,11 +1755,12 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalServerRegion,
|
||||
Region: globalSite.Region,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
DeploymentID: globalDeploymentID,
|
||||
Buckets: buckets,
|
||||
Objects: objects,
|
||||
Versions: versions,
|
||||
Usage: usage,
|
||||
Services: services,
|
||||
Backend: backend,
|
||||
@@ -1477,7 +1782,14 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
query := r.Form
|
||||
healthInfo := madmin.HealthInfo{Version: madmin.HealthInfoVersion}
|
||||
healthInfo := madmin.HealthInfo{
|
||||
Version: madmin.HealthInfoVersion,
|
||||
Minio: madmin.MinioHealthInfo{
|
||||
Info: madmin.MinioInfo{
|
||||
DeploymentID: globalDeploymentID,
|
||||
},
|
||||
},
|
||||
}
|
||||
healthInfoCh := make(chan madmin.HealthInfo)
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
@@ -1499,7 +1811,12 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
logger.LogIf(ctx, enc.Encode(healthInfo))
|
||||
}
|
||||
|
||||
deadline := 1 * time.Hour
|
||||
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
|
||||
if query.Get(string(madmin.HealthDataTypePerfNet)) != "" ||
|
||||
query.Get(string(madmin.HealthDataTypePerfDrive)) != "" ||
|
||||
query.Get(string(madmin.HealthDataTypePerfObj)) != "" {
|
||||
deadline = 1 * time.Hour
|
||||
}
|
||||
if dstr := r.Form.Get("deadline"); dstr != "" {
|
||||
var err error
|
||||
deadline, err = time.ParseDuration(dstr)
|
||||
@@ -1650,8 +1967,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
anonymizeCmdLine := func(cmdLine string) string {
|
||||
if !globalIsDistErasure {
|
||||
// FS mode - single server - hard code to `server1`
|
||||
anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1)
|
||||
return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1)
|
||||
anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
|
||||
return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
|
||||
}
|
||||
|
||||
// Server start command regex groups:
|
||||
@@ -1742,49 +2059,98 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
getAndWriteDrivePerfInfo := func() {
|
||||
if query.Get("perfdrive") == "true" {
|
||||
localDPI := getDrivePerfInfos(deadlinedCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localDPI)
|
||||
healthInfo.Perf.Drives = append(healthInfo.Perf.Drives, localDPI)
|
||||
partialWrite(healthInfo)
|
||||
if query.Get(string(madmin.HealthDataTypePerfDrive)) == "true" {
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
perfCh := globalNotificationSys.GetDrivePerfInfos(deadlinedCtx)
|
||||
for perfInfo := range perfCh {
|
||||
anonymizeAddr(&perfInfo)
|
||||
healthInfo.Perf.Drives = append(healthInfo.Perf.Drives, perfInfo)
|
||||
partialWrite(healthInfo)
|
||||
opts := madmin.DriveSpeedTestOpts{
|
||||
Serial: false,
|
||||
BlockSize: 4 * humanize.MiByte,
|
||||
FileSize: 1 * humanize.GiByte,
|
||||
}
|
||||
|
||||
localDPI := driveSpeedTest(ctx, opts)
|
||||
healthInfo.Perf.DrivePerf = append(healthInfo.Perf.DrivePerf, localDPI)
|
||||
|
||||
perfCh := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
||||
for perfInfo := range perfCh {
|
||||
healthInfo.Perf.DrivePerf = append(healthInfo.Perf.DrivePerf, perfInfo)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
anonymizeNetPerfInfo := func(npi *madmin.NetPerfInfo) {
|
||||
anonymizeAddr(npi)
|
||||
rps := npi.RemotePeers
|
||||
for idx, peer := range rps {
|
||||
anonymizeAddr(&peer)
|
||||
rps[idx] = peer
|
||||
getAndWriteObjPerfInfo := func() {
|
||||
if query.Get(string(madmin.HealthDataTypePerfObj)) == "true" {
|
||||
concurrent := 32
|
||||
size := 64 * humanize.MiByte
|
||||
autotune := true
|
||||
|
||||
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune)
|
||||
|
||||
if !sufficientCapacity {
|
||||
healthInfo.Perf.Error = capacityErrMsg
|
||||
partialWrite(healthInfo)
|
||||
return
|
||||
}
|
||||
|
||||
if !canAutotune {
|
||||
autotune = false
|
||||
}
|
||||
|
||||
bucketExists, err := makeObjectPerfBucket(ctx, objectAPI)
|
||||
if err != nil {
|
||||
healthInfo.Perf.Error = "Could not make object perf bucket: " + err.Error()
|
||||
partialWrite(healthInfo)
|
||||
return
|
||||
}
|
||||
|
||||
if !bucketExists {
|
||||
defer deleteObjectPerfBucket(objectAPI)
|
||||
}
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
opts := speedTestOpts{
|
||||
throughputSize: size,
|
||||
concurrencyStart: concurrent,
|
||||
duration: 10 * time.Second,
|
||||
autotune: autotune,
|
||||
}
|
||||
|
||||
perfCh := objectSpeedTest(ctx, opts)
|
||||
for perfInfo := range perfCh {
|
||||
healthInfo.Perf.ObjPerf = append(healthInfo.Perf.ObjPerf, perfInfo)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
npi.RemotePeers = rps
|
||||
}
|
||||
|
||||
getAndWriteNetPerfInfo := func() {
|
||||
if globalIsDistErasure && query.Get("perfnet") == "true" {
|
||||
localNPI := globalNotificationSys.GetNetPerfInfo(deadlinedCtx)
|
||||
anonymizeNetPerfInfo(&localNPI)
|
||||
healthInfo.Perf.Net = append(healthInfo.Perf.Net, localNPI)
|
||||
|
||||
partialWrite(healthInfo)
|
||||
|
||||
netInfos := globalNotificationSys.DispatchNetPerfChan(deadlinedCtx)
|
||||
for netInfo := range netInfos {
|
||||
anonymizeNetPerfInfo(&netInfo)
|
||||
healthInfo.Perf.Net = append(healthInfo.Perf.Net, netInfo)
|
||||
partialWrite(healthInfo)
|
||||
if query.Get(string(madmin.HealthDataTypePerfObj)) == "true" {
|
||||
if !globalIsDistErasure {
|
||||
return
|
||||
}
|
||||
|
||||
ppi := globalNotificationSys.GetParallelNetPerfInfo(deadlinedCtx)
|
||||
anonymizeNetPerfInfo(&ppi)
|
||||
healthInfo.Perf.NetParallel = ppi
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "netperf")
|
||||
lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
healthInfo.Perf.Error = "Could not acquire lock for netperf: " + err.Error()
|
||||
} else {
|
||||
defer nsLock.Unlock(lkctx.Cancel)
|
||||
|
||||
netPerf := globalNotificationSys.Netperf(ctx, time.Second*10)
|
||||
for _, np := range netPerf {
|
||||
np.Endpoint = anonAddr(np.Endpoint)
|
||||
healthInfo.Perf.NetPerf = append(healthInfo.Perf.NetPerf, np)
|
||||
}
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
@@ -1796,7 +2162,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
anonNetwork[anonEndpoint] = status
|
||||
}
|
||||
return anonNetwork
|
||||
|
||||
}
|
||||
|
||||
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
||||
@@ -1811,7 +2176,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
go func() {
|
||||
defer close(healthInfoCh)
|
||||
|
||||
partialWrite(healthInfo) // Write first message with only version populated
|
||||
partialWrite(healthInfo) // Write first message with only version and deployment id populated
|
||||
getAndWriteCPUs()
|
||||
getAndWritePartitions()
|
||||
getAndWriteOSInfo()
|
||||
@@ -1819,6 +2184,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
getAndWriteProcInfo()
|
||||
getAndWriteMinioConfig()
|
||||
getAndWriteDrivePerfInfo()
|
||||
getAndWriteObjPerfInfo()
|
||||
getAndWriteNetPerfInfo()
|
||||
getAndWriteSysErrors()
|
||||
getAndWriteSysServices()
|
||||
@@ -1848,6 +2214,9 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
})
|
||||
}
|
||||
|
||||
tls := getTLSInfo()
|
||||
isK8s := IsKubernetes()
|
||||
isDocker := IsDocker()
|
||||
healthInfo.Minio.Info = madmin.MinioInfo{
|
||||
Mode: infoMessage.Mode,
|
||||
Domain: infoMessage.Domain,
|
||||
@@ -1860,7 +2229,9 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Services: infoMessage.Services,
|
||||
Backend: infoMessage.Backend,
|
||||
Servers: servers,
|
||||
TLS: getTLSInfo(),
|
||||
TLS: &tls,
|
||||
IsKubernetes: &isK8s,
|
||||
IsDocker: &isDocker,
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
@@ -1875,19 +2246,22 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, enc.Encode(oinfo))
|
||||
w.(http.Flusher).Flush()
|
||||
if err := enc.Encode(oinfo); err != nil {
|
||||
return
|
||||
}
|
||||
if len(healthInfoCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-ticker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-deadlinedCtx.Done():
|
||||
w.(http.Flusher).Flush()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getTLSInfo() madmin.TLSInfo {
|
||||
@@ -1943,17 +2317,21 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
if err := enc.Encode(report); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
if len(reportCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
@@ -2009,7 +2387,6 @@ func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
@@ -2096,7 +2473,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
for _, target := range logger.SystemTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
@@ -2112,7 +2489,7 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, target := range logger.AuditTargets {
|
||||
for _, target := range logger.AuditTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
@@ -2136,19 +2513,9 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
||||
defer cancel()
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(timeout),
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
}}
|
||||
defer client.CloseIdleConnections()
|
||||
client := &http.Client{
|
||||
Transport: globalProxyTransport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
@@ -2241,8 +2608,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
// of profiling data of all nodes
|
||||
zipWriter := zip.NewWriter(encw)
|
||||
defer zipWriter.Close()
|
||||
|
||||
err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
||||
rawDataFn := func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
||||
// Prefix host+disk
|
||||
filename = path.Join(host, disk, filename)
|
||||
if si.Dir {
|
||||
@@ -2251,7 +2617,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
if si.Mode == 0 {
|
||||
// Not, set it to default.
|
||||
si.Mode = 0600
|
||||
si.Mode = 0o600
|
||||
}
|
||||
if si.ModTime.IsZero() {
|
||||
// Set time to now.
|
||||
si.ModTime = time.Now()
|
||||
}
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
@@ -2275,8 +2645,33 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
err = o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if volume != minioMetaBucket && file != formatConfigFile {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
// save args passed to inspect command
|
||||
inspectArgs := []string{fmt.Sprintf(" Inspect path: %s%s%s\n", volume, slashSeparator, file)}
|
||||
cmdLine := []string{"Server command line args: "}
|
||||
for _, pool := range globalEndpoints {
|
||||
cmdLine = append(cmdLine, pool.CmdLine)
|
||||
}
|
||||
cmdLine = append(cmdLine, "\n")
|
||||
inspectArgs = append(inspectArgs, cmdLine...)
|
||||
inspectArgsBytes := []byte(strings.Join(inspectArgs, " "))
|
||||
if err = rawDataFn(bytes.NewReader(inspectArgsBytes), "", "", "inspect-input.txt", StatInfo{
|
||||
Size: int64(len(inspectArgsBytes)),
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createHostAnonymizerForFSMode() map[string]string {
|
||||
@@ -2322,6 +2717,7 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
||||
if !found {
|
||||
// In distributed setup, anonymized addr = 'poolNum.serverNum'
|
||||
newHost := fmt.Sprintf("pool%d.server%d", poolNum, srvrNum)
|
||||
schemePfx := endpoint.Scheme + "://"
|
||||
|
||||
// Hostname
|
||||
mapIfNotPresent(hostAnonymizer, endpoint.Hostname(), newHost)
|
||||
@@ -2331,6 +2727,7 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
||||
// Host + port
|
||||
newHostPort = newHost + ":" + endpoint.Port()
|
||||
mapIfNotPresent(hostAnonymizer, endpoint.Host, newHostPort)
|
||||
mapIfNotPresent(hostAnonymizer, schemePfx+endpoint.Host, newHostPort)
|
||||
}
|
||||
|
||||
newHostPortPath := newHostPort
|
||||
@@ -2339,10 +2736,11 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
||||
currentHostPortPath := endpoint.Host + endpoint.Path
|
||||
newHostPortPath = newHostPort + endpoint.Path
|
||||
mapIfNotPresent(hostAnonymizer, currentHostPortPath, newHostPortPath)
|
||||
mapIfNotPresent(hostAnonymizer, schemePfx+currentHostPortPath, newHostPortPath)
|
||||
}
|
||||
|
||||
// Full url
|
||||
hostAnonymizer[currentURL] = endpoint.Scheme + "://" + newHostPortPath
|
||||
hostAnonymizer[currentURL] = schemePfx + newHostPortPath
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
@@ -40,11 +41,13 @@ type adminErasureTestBed struct {
|
||||
erasureDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
done context.CancelFunc
|
||||
}
|
||||
|
||||
// prepareAdminErasureTestBed - helper function that setups a single-node
|
||||
// Erasure backend for admin-handler tests.
|
||||
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
@@ -56,11 +59,13 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
cancel()
|
||||
return nil, xlErr
|
||||
}
|
||||
|
||||
// Initialize minio server config.
|
||||
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -69,11 +74,11 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.InitStore(objLayer, globalEtcdClient)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
@@ -83,12 +88,14 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
erasureDirs: erasureDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
done: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminErasureTestBed) TearDown() {
|
||||
atb.done()
|
||||
removeRoots(atb.erasureDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
@@ -229,8 +236,8 @@ func TestServiceRestartHandler(t *testing.T) {
|
||||
|
||||
// buildAdminRequest - helper function to build an admin API request.
|
||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error,
|
||||
) {
|
||||
req, err := newTestRequest(method,
|
||||
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
||||
contentLength, bodySeeker)
|
||||
@@ -373,5 +380,4 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -278,8 +278,8 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
respBytes []byte, apiErr APIError, errMsg string,
|
||||
) {
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
@@ -338,8 +338,8 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
clientToken string) ([]byte, APIErrorCode,
|
||||
) {
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
@@ -453,8 +453,8 @@ type healSequence struct {
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
hs madmin.HealOpts, forceStart bool,
|
||||
) *healSequence {
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
@@ -491,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 {
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
count += v
|
||||
}
|
||||
return count
|
||||
}
|
||||
@@ -700,8 +700,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
@@ -710,6 +711,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
|
||||
select {
|
||||
case globalBackgroundHealRoutine.tasks <- task:
|
||||
if serverDebugLog {
|
||||
logger.Info("Task in the queue: %#v", task)
|
||||
}
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -885,6 +889,7 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
opts: &h.settings,
|
||||
}, madmin.HealItemObject)
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
|
||||
@@ -38,13 +38,10 @@ type adminAPIHandlers struct{}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
||||
/// Service operations
|
||||
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
}
|
||||
@@ -71,23 +68,28 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
|
||||
|
||||
/// Health operations
|
||||
// Pool operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/pools/list").HandlerFunc(gz(httpTraceAll(adminAPI.ListPools)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/pools/status").HandlerFunc(gz(httpTraceAll(adminAPI.StatusPool))).Queries("pool", "{pool:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(gz(httpTraceAll(adminAPI.StartDecommission))).Queries("pool", "{pool:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(gz(httpTraceAll(adminAPI.CancelDecommission))).Queries("pool", "{pool:.*}")
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(gz(httpTraceAll(adminAPI.StartProfilingHandler))).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(gz(httpTraceAll(adminAPI.DownloadProfilingHandler)))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(gz(httpTraceAll(adminAPI.ProfileHandler)))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@@ -106,7 +108,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))
|
||||
@@ -191,16 +193,26 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/disable").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationDisable)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
@@ -212,6 +224,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
}
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(httpTraceHdrs(adminAPI.ObjectSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(httpTraceHdrs(adminAPI.DriveSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(httpTraceHdrs(adminAPI.NetperfHandler))
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(http.HandlerFunc(adminAPI.TraceHandler)))
|
||||
|
||||
@@ -50,7 +50,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
|
||||
@@ -48,10 +48,15 @@ func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElem
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
// ObjectV object version key/versionId
|
||||
type ObjectV struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectV
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
|
||||
@@ -82,6 +82,7 @@ const (
|
||||
ErrIncompleteBody
|
||||
ErrInternalError
|
||||
ErrInvalidAccessKeyID
|
||||
ErrAccessKeyDisabled
|
||||
ErrInvalidBucketName
|
||||
ErrInvalidDigest
|
||||
ErrInvalidRange
|
||||
@@ -109,6 +110,7 @@ const (
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrInvalidLifecycleWithObjectLock
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
@@ -128,6 +130,7 @@ const (
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrReplicationDenyEditError
|
||||
ErrReplicationNoMatchingRuleError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
@@ -209,6 +212,7 @@ const (
|
||||
ErrInvalidSSECustomerParameters
|
||||
ErrIncompatibleEncryptionMethod
|
||||
ErrKMSNotConfigured
|
||||
ErrKMSKeyNotFoundException
|
||||
|
||||
ErrNoAccessKey
|
||||
ErrInvalidToken
|
||||
@@ -395,10 +399,10 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalServerRegion != "" {
|
||||
if globalSite.Region != "" {
|
||||
switch errCode {
|
||||
case ErrAuthorizationHeaderMalformed:
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -512,6 +516,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccessKeyDisabled: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "Your account is disabled; please contact your administrator.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
Code: "InvalidBucketName",
|
||||
Description: "The specified bucket is not valid.",
|
||||
@@ -577,6 +586,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrInvalidLifecycleWithObjectLock: {
|
||||
Code: "InvalidLifecycleWithObjectLock",
|
||||
Description: "The lifecycle configuration containing MaxNoncurrentVersions is not supported with object locking",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchBucketSSEConfig: {
|
||||
Code: "ServerSideEncryptionConfigurationNotFoundError",
|
||||
Description: "The server side encryption configuration was not found",
|
||||
@@ -674,7 +688,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrAllAccessDisabled: {
|
||||
Code: "AllAccessDisabled",
|
||||
Description: "All access to this bucket has been disabled.",
|
||||
Description: "All access to this resource has been disabled.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMalformedPolicy: {
|
||||
@@ -882,6 +896,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "No matching replication rule found for this object prefix",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationDenyEditError: {
|
||||
Code: "XMinioReplicationDenyEdit",
|
||||
Description: "Cannot alter local replication config since this server is in a cluster replication setup",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
@@ -973,7 +992,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
/// Bucket notification related errors.
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "A specified event is not supported for notifications.",
|
||||
@@ -1109,6 +1128,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Server side encryption specified but KMS is not configured",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrKMSKeyNotFoundException: {
|
||||
Code: "KMS.NotFoundException",
|
||||
Description: "Invalid keyId",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoAccessKey: {
|
||||
Code: "AccessDenied",
|
||||
Description: "No AWSAccessKey was presented",
|
||||
@@ -1120,14 +1144,14 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
Code: "XAmzContentSHA256Mismatch",
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
/// MinIO extensions.
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
@@ -1362,15 +1386,15 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrBackendDown: {
|
||||
Code: "XMinioBackendDown",
|
||||
Description: "Object storage backend is unreachable",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
Description: "Remote backend is unreachable",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectContinuationToken: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The continuation token provided is incorrect",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
//S3 Select API Errors
|
||||
// S3 Select API Errors
|
||||
ErrEmptyRequestBody: {
|
||||
Code: "EmptyRequestBody",
|
||||
Description: "Request body cannot be empty.",
|
||||
@@ -1894,6 +1918,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrIncompatibleEncryptionMethod
|
||||
case errKMSNotConfigured:
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case errKMSKeyNotFound:
|
||||
apiErr = ErrKMSKeyNotFoundException
|
||||
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
@@ -1956,6 +1983,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectLocked:
|
||||
apiErr = ErrObjectLocked
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
@@ -2072,6 +2101,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
//nolint:gocritic
|
||||
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
|
||||
"request declared a Content-Length of %d but only wrote %d bytes",
|
||||
&ie, &iw); ferr != nil {
|
||||
@@ -2104,7 +2134,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
@@ -2127,6 +2157,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
}
|
||||
|
||||
if apiErr.Code == "XMinioBackendDown" {
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
return apiErr
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
@@ -2212,7 +2247,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// since S3 only sends one Error XML response.
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case azblob.StorageError:
|
||||
apiErr = APIError{
|
||||
@@ -2222,6 +2256,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
default:
|
||||
//nolint:gocritic
|
||||
if errors.Is(err, errMalformedEncoding) {
|
||||
apiErr = APIError{
|
||||
Code: "BadRequest",
|
||||
@@ -2271,7 +2306,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalServerRegion,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
if region := globalSite.Region; region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -126,6 +126,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
// Empty values for object lock and retention can be skipped.
|
||||
if v == "" && equals(k, xhttp.AmzObjectLockMode, xhttp.AmzObjectLockRetainUntilDate) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
func TestNewRequestID(t *testing.T) {
|
||||
// Ensure that it returns an alphanumeric result of length 16.
|
||||
var id = mustGetRequestID(UTCNow())
|
||||
id := mustGetRequestID(UTCNow())
|
||||
|
||||
if len(id) != 16 {
|
||||
t.Fail()
|
||||
|
||||
@@ -268,7 +268,6 @@ type StringMap map[string]string
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
@@ -417,8 +416,8 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{
|
||||
data := ListBucketsResponse{}
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
@@ -439,14 +438,14 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
data := ListVersionsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
content := ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -486,7 +485,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@@ -497,14 +496,14 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
data := ListObjectsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -535,7 +534,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@@ -546,14 +545,14 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
data := ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -608,7 +607,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
||||
}
|
||||
@@ -724,9 +723,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
if !quiet {
|
||||
deleteResp.DeletedObjects = deletedObjects
|
||||
}
|
||||
if len(errs) == len(deletedObjects) {
|
||||
deleteResp.DeletedObjects = nil
|
||||
}
|
||||
deleteResp.Errors = errs
|
||||
return deleteResp
|
||||
}
|
||||
@@ -740,7 +736,6 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
||||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
w.Write(response)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -791,9 +786,9 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
}
|
||||
|
||||
// Generate error response.
|
||||
@@ -825,8 +820,8 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||
errBody string, reqURL *url.URL) {
|
||||
|
||||
errBody string, reqURL *url.URL,
|
||||
) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/minio/console/restapi"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
@@ -42,6 +43,18 @@ func setHTTPServer(h *xhttp.Server) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newConsoleServerFn() *restapi.Server {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalConsoleSrv
|
||||
}
|
||||
|
||||
func setConsoleSrv(srv *restapi.Server) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalConsoleSrv = srv
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
@@ -274,7 +287,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("getobjectlegalhold", maxClients(gz(httpTraceAll(api.GetObjectLegalHoldHandler))))).Queries("legal-hold", "")
|
||||
// GetObject - note gzip compression is *not* added due to Range requests.
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
|
||||
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectHandler)))))
|
||||
// CopyObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(gz(httpTraceAll(api.CopyObjectHandler)))))
|
||||
@@ -301,7 +314,8 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
|
||||
@@ -329,6 +343,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstatus", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStatusHandler))))).Queries("replication-reset-status", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
@@ -355,7 +372,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
// DeleteBucketWebsiteHandler
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
@@ -403,9 +420,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// PutBucketNotification
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketnotification", maxClients(gz(httpTraceAll(api.PutBucketNotificationHandler))))).Queries("notification", "")
|
||||
// ResetBucketReplicationState - MinIO extension API
|
||||
// ResetBucketReplicationStart - MinIO extension API
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstate", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStateHandler))))).Queries("replication-reset", "")
|
||||
collectAPIStats("resetbucketreplicationstart", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStartHandler))))).Queries("replication-reset", "")
|
||||
|
||||
// PutBucket
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucket", maxClients(gz(httpTraceAll(api.PutBucketHandler)))))
|
||||
@@ -452,7 +470,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
@@ -470,7 +488,6 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
|
||||
@@ -44,7 +44,6 @@ func TestS3EncodeName(t *testing.T) {
|
||||
if testCase.expectedOutput != outputText {
|
||||
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -197,13 +197,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
if token == "" {
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -212,9 +206,15 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
claims, err := auth.ExtractClaims(token, secret)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
if subtle.ConstantTimeCompare([]byte(secret), []byte(globalActiveCred.SecretKey)) == 1 {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims, err = auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
}
|
||||
|
||||
// If OPA is set, return without any further checks.
|
||||
@@ -238,39 +238,53 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
}
|
||||
|
||||
// If LDAP claim key is set, return here.
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Session token must have a policy, reject requests without policy
|
||||
// claim.
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
|
||||
if token == "" && cred.IsTemp() {
|
||||
// Temporary credentials should always have x-amz-security-token
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
|
||||
if token != "" && !cred.IsTemp() {
|
||||
// x-amz-security-token should not present for static credentials.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
return claims, ErrNone
|
||||
|
||||
if cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
// validate token for temporary credentials only.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
secret := globalActiveCred.SecretKey
|
||||
if cred.IsServiceAccount() {
|
||||
token = cred.SessionToken
|
||||
secret = cred.SecretKey
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
claims, err := getClaimsFromTokenWithSecret(token, secret)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
@@ -299,7 +313,7 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -487,6 +501,26 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
amzDate, errCode := parseAmzDateHeader(r)
|
||||
if errCode != ErrNone {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
|
||||
// or in the future, reject request otherwise.
|
||||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
if isSupportedS3AuthType(aType) || aType == authTypeJWT || aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
@@ -509,7 +543,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
@@ -576,7 +610,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
|
||||
@@ -341,7 +341,8 @@ func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength in
|
||||
}
|
||||
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
|
||||
body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
body io.ReadSeeker, t *testing.T,
|
||||
) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
|
||||
cred := globalActiveCred
|
||||
@@ -362,11 +363,14 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(context.Background(), objLayer)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
globalIAMSys.InitStore(objLayer, globalEtcdClient)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -392,10 +396,9 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerRegion, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -433,15 +436,15 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -453,11 +456,11 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(context.Background(), objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.InitStore(objLayer, globalEtcdClient)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
|
||||
@@ -115,7 +115,6 @@ func newHealRoutine() *healRoutine {
|
||||
tasks: make(chan healTask),
|
||||
workers: workers,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
|
||||
@@ -18,12 +18,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -263,7 +263,7 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content - use 'mc admin heal alias/ --verbose' to check the status",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
@@ -277,35 +277,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
for _, disk := range globalLocalDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
if len(disksToHeal) == globalEndpoints.NEndpoints() {
|
||||
// When all disks == all command line endpoints
|
||||
// this is a fresh setup, no need to trigger healing.
|
||||
return Endpoints{}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
@@ -334,7 +325,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.queueHealTask(healSource{bucket: nopHeal}, madmin.HealItemMetadata)
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal - 'mc admin heal alias/ --verbose' to check the status.",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
@@ -375,15 +366,13 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
})
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
@@ -407,12 +396,15 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
}
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', disk was swapped again on %s pool: %w",
|
||||
disk, humanize.Ordinal(i+1), err))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
@@ -434,16 +426,19 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, tracker.QueuedBuckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
var buf bytes.Buffer
|
||||
tracker.printTo(&buf)
|
||||
logger.Info("Summary:\n%s", buf.String())
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
|
||||
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
|
||||
logger.Info("Summary:\n")
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.Info("\n")
|
||||
}
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
|
||||
@@ -119,8 +119,10 @@ func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath strin
|
||||
// Close all the readers.
|
||||
func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
for _, r := range rs {
|
||||
if br, ok := r.(io.Closer); ok {
|
||||
br.Close()
|
||||
if r != nil {
|
||||
if br, ok := r.(io.Closer); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -128,8 +130,10 @@ func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
if w != nil {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -212,7 +216,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
// bitrotSelfTest tries to catch any issue in the bitrot implementation
|
||||
// early instead of silently corrupting data.
|
||||
func bitrotSelfTest() {
|
||||
var checksums = map[BitrotAlgorithm]string{
|
||||
checksums := map[BitrotAlgorithm]string{
|
||||
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
|
||||
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
|
||||
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
|
||||
|
||||
@@ -86,14 +86,29 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
return fmt.Errorf("Expected same MINIO_ environment variables and values")
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s / Mismatch environment values: %s`, missing, mismatching)
|
||||
}
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s`, missing)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() ServerSystemConfig {
|
||||
@@ -123,7 +138,6 @@ func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Reque
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// registerBootstrapRESTHandlers - register bootstrap rest router.
|
||||
@@ -191,11 +205,11 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
if isNetworkError(err) {
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
if !isNetworkError(err) {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
|
||||
}
|
||||
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
@@ -248,7 +262,7 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
|
||||
@@ -20,12 +20,15 @@ package cmd
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
@@ -84,6 +87,19 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
||||
return
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
@@ -92,7 +108,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -142,7 +158,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -186,10 +202,19 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Call site replication hook.
|
||||
//
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,8 @@ func (sys *BucketSSEConfigSys) Get(bucket string) (*sse.BucketSSEConfig, error)
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
sseCfg, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
return sseCfg, err
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
|
||||
@@ -133,8 +133,6 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
ctx, cancel := g.WithCancelOnError(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
@@ -143,9 +141,12 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}, index)
|
||||
}
|
||||
|
||||
if err := g.WaitErr(); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
ctx := GlobalContext
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
@@ -210,7 +211,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -414,18 +415,23 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
const maxBodySize = 2 * 100000 * 1024
|
||||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjects, maxBodySize); err != nil {
|
||||
deleteObjectsReq := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjectsReq, maxBodySize); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objects := make([]ObjectV, len(deleteObjectsReq.Objects))
|
||||
// Convert object name delete objects if it has `/` in the beginning.
|
||||
for i := range deleteObjects.Objects {
|
||||
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
|
||||
for i := range deleteObjectsReq.Objects {
|
||||
deleteObjectsReq.Objects[i].ObjectName = trimLeadingSlash(deleteObjectsReq.Objects[i].ObjectName)
|
||||
objects[i] = deleteObjectsReq.Objects[i].ObjectV
|
||||
}
|
||||
|
||||
// Make sure to update context to print ObjectNames for multi objects.
|
||||
ctx = updateReqContext(ctx, objects...)
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
@@ -443,12 +449,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the number of objects is empty
|
||||
if len(deleteObjects.Objects) == 0 || len(deleteObjects.Objects) > maxDeleteList {
|
||||
if len(deleteObjectsReq.Objects) == 0 || len(deleteObjectsReq.Objects) > maxDeleteList {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
objectsToDelete := map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
@@ -460,7 +466,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjectsReq.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
@@ -468,16 +474,23 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
oss := make([]*objSweeper, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
type deleteResult struct {
|
||||
delInfo DeletedObject
|
||||
errInfo DeleteError
|
||||
}
|
||||
|
||||
deleteResults := make([]deleteResult, len(deleteObjectsReq.Objects))
|
||||
|
||||
oss := make([]*objSweeper, len(deleteObjectsReq.Objects))
|
||||
|
||||
for index, object := range deleteObjectsReq.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
|
||||
return
|
||||
}
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -489,7 +502,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if _, err := uuid.Parse(object.VersionID); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
|
||||
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -519,8 +532,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
if replicateDeletes {
|
||||
dsc = checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
},
|
||||
}, goi, opts, gerr)
|
||||
if dsc.ReplicateAny() {
|
||||
if object.VersionID != "" {
|
||||
@@ -535,7 +550,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if object.VersionID != "" && hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -561,20 +576,25 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Disable timeouts and cancellation
|
||||
ctx = bgContext(ctx)
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
|
||||
for i := range errs {
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
},
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus(),
|
||||
VersionPurgeStatuses: dObjects[i].ReplicationState.VersionPurgeStatusInternal,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].ReplicationState.ReplicationStatusInternal,
|
||||
@@ -585,11 +605,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if replicateDeletes {
|
||||
dObjects[i].ReplicationState = deleteList[i].ReplicationState()
|
||||
}
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
deleteResults[dindex].delInfo = dObjects[i]
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
dErrs[dindex] = DeleteError{
|
||||
deleteResults[dindex].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: deleteList[i].ObjectName,
|
||||
@@ -597,15 +617,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
var deleteErrors []DeleteError
|
||||
for _, dErr := range dErrs {
|
||||
if dErr.Code != "" {
|
||||
deleteErrors = append(deleteErrors, dErr)
|
||||
// Generate response
|
||||
deleteErrors := make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
||||
deletedObjects := make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
||||
for _, deleteResult := range deleteResults {
|
||||
if deleteResult.errInfo.Code != "" {
|
||||
deleteErrors = append(deleteErrors, deleteResult.errInfo)
|
||||
} else {
|
||||
deletedObjects = append(deletedObjects, deleteResult.delInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response
|
||||
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
|
||||
response := generateMultiDeleteResponse(deleteObjectsReq.Quiet, deletedObjects, deleteErrors)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
@@ -683,13 +706,27 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectLockEnabled := false
|
||||
if vs, found := r.Header[http.CanonicalHeaderKey("x-amz-bucket-object-lock-enabled")]; found {
|
||||
v := strings.ToLower(strings.Join(vs, ""))
|
||||
if v != "true" && v != "false" {
|
||||
if vs := r.Header.Get(xhttp.AmzObjectLockEnabled); len(vs) > 0 {
|
||||
v := strings.ToLower(vs)
|
||||
switch v {
|
||||
case "true", "false":
|
||||
objectLockEnabled = v == "true"
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
forceCreate := false
|
||||
if vs := r.Header.Get(xhttp.MinIOForceCreate); len(vs) > 0 {
|
||||
v := strings.ToLower(vs)
|
||||
switch v {
|
||||
case "true", "false":
|
||||
forceCreate = v == "true"
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
objectLockEnabled = v == "true"
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
@@ -714,6 +751,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
opts := BucketOptions{
|
||||
Location: location,
|
||||
LockEnabled: objectLockEnabled,
|
||||
ForceCreate: forceCreate,
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
@@ -902,7 +940,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName))
|
||||
}
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
@@ -928,6 +966,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// explicit permissions for the user.
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
BucketName: bucket,
|
||||
@@ -1199,7 +1238,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
writeResponse(w, http.StatusOK, nil, mimeXML)
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
@@ -1246,6 +1285,17 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
switch {
|
||||
case err != nil:
|
||||
if _, ok := err.(BucketReplicationConfigNotFound); !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1269,7 +1319,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if globalDNSConfig != nil {
|
||||
if err2 := globalDNSConfig.Put(bucket); err2 != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, pl1ease fix it manually", err2))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2))
|
||||
}
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
@@ -1277,7 +1327,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
// Call site replication hook.
|
||||
if err := globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1339,12 +1389,12 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
}
|
||||
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, objectLockConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1392,7 +1442,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1449,7 +1499,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1494,7 +1544,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1531,7 +1581,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1575,7 +1625,10 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
|
||||
return
|
||||
@@ -1602,7 +1655,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1639,7 +1692,7 @@ func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWr
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1677,7 +1730,11 @@ func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.Respons
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, nil); err != nil {
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1720,21 +1777,22 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW
|
||||
usageInfo = dataUsageInfo.BucketsUsage[bucket]
|
||||
}
|
||||
|
||||
bucketReplStats := getLatestReplicationStats(bucket, usageInfo)
|
||||
jsonData, err := json.Marshal(bucketReplStats)
|
||||
if err != nil {
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err = enc.Encode(getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, jsonData)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStateHandler - starts a replication reset for all objects in a bucket which
|
||||
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
|
||||
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
|
||||
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced.
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationState")
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
|
||||
// already in progress it returns an error
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
@@ -1758,6 +1816,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
}), r.URL)
|
||||
}
|
||||
}
|
||||
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -1776,7 +1835,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1788,12 +1847,13 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn})
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("Remote target ARN %s missing/not eligible for replication resync", arn),
|
||||
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1817,22 +1877,90 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
}
|
||||
if err := startReplicationResync(ctx, bucket, arn, resetID, resetBeforeDate, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: err,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
|
||||
// This API is a MinIO only extension
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := r.URL.Query().Get("arn")
|
||||
var err error
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
|
||||
Arn: tarn,
|
||||
ResetID: st.ResyncID,
|
||||
StartTime: st.StartTime,
|
||||
EndTime: st.EndTime,
|
||||
ResyncStatus: st.ResyncStatus.String(),
|
||||
ReplicatedSize: st.ReplicatedSize,
|
||||
ReplicatedCount: st.ReplicatedCount,
|
||||
FailedSize: st.FailedSize,
|
||||
FailedCount: st.FailedCount,
|
||||
Bucket: st.Bucket,
|
||||
Object: st.Object,
|
||||
})
|
||||
}
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -35,7 +36,8 @@ func TestRemoveBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
@@ -80,8 +82,8 @@ func TestGetBucketLocationHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@@ -162,7 +164,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -209,7 +210,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -224,8 +224,8 @@ func TestHeadBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@@ -281,7 +281,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -296,7 +295,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -314,7 +312,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -330,8 +327,8 @@ func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
@@ -551,7 +548,6 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -567,8 +563,8 @@ func TestListBucketsHandler(t *testing.T) {
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
accessKey string
|
||||
@@ -614,7 +610,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -629,7 +624,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||
}
|
||||
@@ -645,7 +639,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -656,12 +649,12 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
var err error
|
||||
|
||||
contentBytes := []byte("hello")
|
||||
@@ -680,10 +673,35 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
objectNames = append(objectNames, objectName)
|
||||
}
|
||||
|
||||
for _, name := range []string{"private/object", "public/object"} {
|
||||
// Uploading the object with retention enabled
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, name, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %s: Error uploading object: <ERROR> %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// The following block will create a bucket policy with delete object to 'public/*'. This is
|
||||
// to test a mixed response of a successful & failure while deleting objects in a single request
|
||||
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
|
||||
credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusNoContent {
|
||||
t.Errorf("Expected the response status to be `%d`, but instead found `%d`", 200, rec.Code)
|
||||
}
|
||||
|
||||
getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) {
|
||||
for _, objectName := range objectNames {
|
||||
objectList = append(objectList, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: objectName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -702,9 +720,21 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
return deleteErrorList
|
||||
}
|
||||
|
||||
objects := []ObjectToDelete{}
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: "private/object",
|
||||
},
|
||||
})
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: "public/object",
|
||||
},
|
||||
})
|
||||
requestList := []DeleteObjectsRequest{
|
||||
{Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])},
|
||||
{Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])},
|
||||
{Quiet: false, Objects: objects},
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
@@ -741,6 +771,20 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
anonResponse := generateMultiDeleteResponse(requestList[0].Quiet, nil, getDeleteErrorList(requestList[0].Objects))
|
||||
encodedAnonResponse := encodeResponse(anonResponse)
|
||||
|
||||
anonRequestWithPartialPublicAccess := encodeResponse(requestList[2])
|
||||
anonResponseWithPartialPublicAccess := generateMultiDeleteResponse(requestList[2].Quiet,
|
||||
[]DeletedObject{
|
||||
{ObjectName: "public/object"},
|
||||
},
|
||||
[]DeleteError{
|
||||
{
|
||||
Code: errorCodes[ErrAccessDenied].Code,
|
||||
Message: errorCodes[ErrAccessDenied].Description,
|
||||
Key: "private/object",
|
||||
},
|
||||
})
|
||||
encodedAnonResponseWithPartialPublicAccess := encodeResponse(anonResponseWithPartialPublicAccess)
|
||||
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
objects []byte
|
||||
@@ -800,6 +844,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
expectedContent: encodedAnonResponse,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 6.
|
||||
// Anonymous user has access to some public folder, issue removing with
|
||||
// another private object as well
|
||||
{
|
||||
bucket: bucketName,
|
||||
objects: anonRequestWithPartialPublicAccess,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedContent: encodedAnonResponseWithPartialPublicAccess,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -91,7 +91,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,7 +34,8 @@ func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
|
||||
// Test for authentication
|
||||
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
@@ -150,8 +151,8 @@ func TestBucketLifecycle(t *testing.T) {
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
// Tests are related and the order is important.
|
||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
creds auth.Credentials, t *testing.T) {
|
||||
|
||||
creds auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
@@ -266,8 +267,8 @@ func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRo
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}) {
|
||||
|
||||
},
|
||||
) {
|
||||
for i, testCase := range testCases {
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
@@ -81,40 +81,58 @@ type expiryTask struct {
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
once sync.Once
|
||||
expiryCh chan expiryTask
|
||||
once sync.Once
|
||||
byDaysCh chan expiryTask
|
||||
byNewerNoncurrentCh chan newerNoncurrentTask
|
||||
}
|
||||
|
||||
// PendingTasks returns the number of pending ILM expiry tasks.
|
||||
func (es *expiryState) PendingTasks() int {
|
||||
return len(es.expiryCh)
|
||||
return len(es.byDaysCh) + len(es.byNewerNoncurrentCh)
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, restoredObject bool, rmVersion bool) {
|
||||
// close closes work channels exactly once.
|
||||
func (es *expiryState) close() {
|
||||
es.once.Do(func() {
|
||||
close(es.byDaysCh)
|
||||
close(es.byNewerNoncurrentCh)
|
||||
})
|
||||
}
|
||||
|
||||
// enqueueByDays enqueues object versions expired by days for expiry.
|
||||
func (es *expiryState) enqueueByDays(oi ObjectInfo, restoredObject bool, rmVersion bool) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.once.Do(func() {
|
||||
close(es.expiryCh)
|
||||
})
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion, restoredObject: restoredObject}:
|
||||
es.close()
|
||||
case es.byDaysCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion, restoredObject: restoredObject}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalExpiryState *expiryState
|
||||
)
|
||||
// enqueueByNewerNoncurrent enqueues object versions expired by
|
||||
// NewerNoncurrentVersions limit for expiry.
|
||||
func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.close()
|
||||
case es.byNewerNoncurrentCh <- newerNoncurrentTask{bucket: bucket, versions: versions}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var globalExpiryState *expiryState
|
||||
|
||||
func newExpiryState() *expiryState {
|
||||
return &expiryState{
|
||||
expiryCh: make(chan expiryTask, 10000),
|
||||
byDaysCh: make(chan expiryTask, 10000),
|
||||
byNewerNoncurrentCh: make(chan newerNoncurrentTask, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
for t := range globalExpiryState.byDaysCh {
|
||||
if t.objInfo.TransitionedObject.Status != "" {
|
||||
applyExpiryOnTransitionedObject(ctx, objectAPI, t.objInfo, t.restoredObject)
|
||||
} else {
|
||||
@@ -122,6 +140,18 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for t := range globalExpiryState.byNewerNoncurrentCh {
|
||||
deleteObjectVersions(ctx, objectAPI, t.bucket, t.versions)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
|
||||
// by NewerNoncurrentVersions
|
||||
type newerNoncurrentTask struct {
|
||||
bucket string
|
||||
versions []ObjectToDelete
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
@@ -135,6 +165,9 @@ type transitionState struct {
|
||||
killCh chan struct{}
|
||||
|
||||
activeTasks int32
|
||||
|
||||
lastDayMu sync.RWMutex
|
||||
lastDayStats map[string]*lastDayTierStats
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
@@ -148,9 +181,7 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
)
|
||||
var globalTransitionState *transitionState
|
||||
|
||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||
return &transitionState{
|
||||
@@ -158,6 +189,7 @@ func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionStat
|
||||
ctx: ctx,
|
||||
objAPI: objAPI,
|
||||
killCh: make(chan struct{}),
|
||||
lastDayStats: make(map[string]*lastDayTierStats),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,14 +217,47 @@ func (t *transitionState) worker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
return
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, 1)
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
var tier string
|
||||
var err error
|
||||
if tier, err = transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
} else {
|
||||
ts := tierStats{
|
||||
TotalSize: uint64(oi.Size),
|
||||
NumVersions: 1,
|
||||
}
|
||||
if oi.IsLatest {
|
||||
ts.NumObjects = 1
|
||||
}
|
||||
t.addLastDayStats(tier, ts)
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, -1)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transitionState) addLastDayStats(tier string, ts tierStats) {
|
||||
t.lastDayMu.Lock()
|
||||
defer t.lastDayMu.Unlock()
|
||||
|
||||
if _, ok := t.lastDayStats[tier]; !ok {
|
||||
t.lastDayStats[tier] = &lastDayTierStats{}
|
||||
}
|
||||
t.lastDayStats[tier].addStats(ts)
|
||||
}
|
||||
|
||||
func (t *transitionState) getDailyAllTierStats() DailyAllTierStats {
|
||||
t.lastDayMu.RLock()
|
||||
defer t.lastDayMu.RUnlock()
|
||||
|
||||
res := make(DailyAllTierStats, len(t.lastDayStats))
|
||||
for tier, st := range t.lastDayStats {
|
||||
res[tier] = st.clone()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// UpdateWorkers at the end of this function leaves n goroutines waiting for
|
||||
// transition tasks
|
||||
func (t *transitionState) UpdateWorkers(n int) {
|
||||
@@ -337,15 +402,16 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) error {
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) (string, error) {
|
||||
lc, err := globalLifecycleSys.Get(oi.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
tier := lc.TransitionTier(oi.ToLifecycleOpts())
|
||||
opts := ObjectOptions{
|
||||
Transition: TransitionOptions{
|
||||
Status: lifecycle.TransitionPending,
|
||||
Tier: lc.TransitionTier(oi.ToLifecycleOpts()),
|
||||
Tier: tier,
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
@@ -353,7 +419,7 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo)
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
return objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
return tier, objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
@@ -436,9 +502,7 @@ func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil
|
||||
}
|
||||
|
||||
var (
|
||||
selectParamsXMLName = "SelectParameters"
|
||||
)
|
||||
var selectParamsXMLName = "SelectParameters"
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
@@ -550,7 +614,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
|
||||
if rreq.Type == SelectRestoreRequest {
|
||||
for _, v := range rreq.OutputLocation.S3.UserMetadata {
|
||||
if !strings.HasPrefix("x-amz-meta", strings.ToLower(v.Name)) {
|
||||
if !strings.HasPrefix(strings.ToLower(v.Name), "x-amz-meta") {
|
||||
meta["x-amz-meta-"+v.Name] = v.Value
|
||||
continue
|
||||
}
|
||||
@@ -630,9 +694,9 @@ func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
// String returns x-amz-restore compatible representation of r.
|
||||
func (r restoreObjStatus) String() string {
|
||||
if r.Ongoing() {
|
||||
return "ongoing-request=true"
|
||||
return `ongoing-request="true"`
|
||||
}
|
||||
return fmt.Sprintf("ongoing-request=false, expiry-date=%s", r.expiry.Format(http.TimeFormat))
|
||||
return fmt.Sprintf(`ongoing-request="false", expiry-date="%s"`, r.expiry.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Expiry returns expiry of restored object and true if restore-object has completed.
|
||||
@@ -676,12 +740,11 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
}
|
||||
|
||||
switch progressTokens[1] {
|
||||
case "true":
|
||||
case "true", `"true"`: // true without double quotes is deprecated in Feb 2022
|
||||
if len(tokens) == 1 {
|
||||
return ongoingRestoreObj(), nil
|
||||
}
|
||||
|
||||
case "false":
|
||||
case "false", `"false"`: // false without double quotes is deprecated in Feb 2022
|
||||
if len(tokens) != 2 {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
@@ -692,8 +755,7 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
if strings.TrimSpace(expiryTokens[0]) != "expiry-date" {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err := time.Parse(http.TimeFormat, expiryTokens[1])
|
||||
expiry, err := time.Parse(http.TimeFormat, strings.Trim(expiryTokens[1], `"`))
|
||||
if err != nil {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
// valid: represents a restored object, 'pending' expiry.
|
||||
restoreHdr: "ongoing-request=false, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
restoreHdr: `ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"`,
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: false,
|
||||
expiry: time.Date(2012, 12, 21, 0, 0, 0, 0, time.UTC),
|
||||
@@ -45,7 +45,7 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// valid: represents an ongoing restore object request.
|
||||
restoreHdr: "ongoing-request=true",
|
||||
restoreHdr: `ongoing-request="true"`,
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: true,
|
||||
},
|
||||
@@ -53,13 +53,13 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// invalid; ongoing restore object request can't have expiry set on it.
|
||||
restoreHdr: "ongoing-request=true, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
restoreHdr: `ongoing-request="true", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"`,
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
{
|
||||
// invalid; completed restore object request must have expiry set on it.
|
||||
restoreHdr: "ongoing-request=false",
|
||||
restoreHdr: `ongoing-request="false"`,
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -26,28 +27,9 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
_, cancel := g.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.WaitErr()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
@@ -61,8 +43,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
}
|
||||
|
||||
if encodingType != "" {
|
||||
// Only url encoding type is supported
|
||||
if strings.ToLower(encodingType) != "url" {
|
||||
// AWS S3 spec only supports 'url' encoding type
|
||||
if !strings.EqualFold(encodingType, "url") {
|
||||
return ErrInvalidEncodingMethod
|
||||
}
|
||||
}
|
||||
@@ -118,7 +100,11 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectVersionsInfo.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -180,7 +166,11 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -255,7 +245,11 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
@@ -352,7 +346,11 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsInfo.Objects); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to decrypt ETag: %v", err)) // TODO(aead): Remove once we are confident that decryption does not fail accidentially
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
@@ -74,7 +75,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
@@ -83,13 +84,13 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
if globalIsGateway && globalGatewayName != NASBackendGateway {
|
||||
if configFile == bucketPolicyConfig {
|
||||
if configData == nil {
|
||||
return objAPI.DeleteBucketPolicy(GlobalContext, bucket)
|
||||
return objAPI.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return objAPI.SetBucketPolicy(GlobalContext, bucket, config)
|
||||
return objAPI.SetBucketPolicy(ctx, bucket, config)
|
||||
}
|
||||
return NotImplemented{}
|
||||
}
|
||||
@@ -98,7 +99,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !globalIsErasure && !globalIsDistErasure && errors.Is(err, errVolumeNotFound) {
|
||||
// Only single drive mode needs this fallback.
|
||||
@@ -111,21 +112,26 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
switch configFile {
|
||||
case bucketPolicyConfig:
|
||||
meta.PolicyConfigJSON = configData
|
||||
meta.PolicyConfigUpdatedAt = UTCNow()
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
meta.EncryptionConfigXML = configData
|
||||
meta.EncryptionConfigUpdatedAt = UTCNow()
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
meta.TaggingConfigUpdatedAt = UTCNow()
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
meta.QuotaConfigUpdatedAt = UTCNow()
|
||||
case objectLockConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ObjectLockConfigXML = configData
|
||||
meta.ObjectLockConfigUpdatedAt = UTCNow()
|
||||
case bucketVersioningConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
@@ -136,6 +142,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
meta.ReplicationConfigUpdatedAt = UTCNow()
|
||||
case bucketTargetsFile:
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, kms.Context{
|
||||
bucket: meta.Name,
|
||||
@@ -148,12 +155,12 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if err := meta.Save(GlobalContext, objAPI); err != nil {
|
||||
if err := meta.Save(ctx, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Set(bucket, meta)
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
globalNotificationSys.LoadBucketMetadata(bgContext(ctx), bucket) // Do not use caller context here
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -187,7 +194,7 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
// GetVersioningConfig returns configured versioning config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -196,34 +203,34 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.taggingConfig == nil {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.taggingConfig, nil
|
||||
return meta.taggingConfig, meta.TaggingConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetObjectLockConfig returns configured object lock config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.objectLockConfig == nil {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.objectLockConfig, nil
|
||||
return meta.objectLockConfig, meta.ObjectLockConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetLifecycleConfig returns configured lifecycle config
|
||||
@@ -245,7 +252,7 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
return meta.lifecycleConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucket}
|
||||
@@ -274,7 +281,7 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
return meta.notificationConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -283,18 +290,27 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
|
||||
// GetSSEConfig returns configured SSE config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.sseConfig == nil {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.sseConfig, nil
|
||||
return meta.sseConfig, meta.EncryptionConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// CreatedAt returns the time of creation of bucket
|
||||
func (sys *BucketMetadataSys) CreatedAt(bucket string) (time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return meta.Created.UTC(), nil
|
||||
}
|
||||
|
||||
// GetPolicyConfig returns configured bucket policy
|
||||
@@ -308,7 +324,7 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
@@ -323,35 +339,35 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er
|
||||
|
||||
// GetQuotaConfig returns configured bucket quota
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, time.Time, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
return meta.quotaConfig, nil
|
||||
return meta.quotaConfig, meta.QuotaConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetReplicationConfig returns configured bucket replication config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, time.Time, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
|
||||
if meta.replicationConfig == nil {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.replicationConfig, nil
|
||||
return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetBucketTargetsConfig returns configured bucket targets for this bucket
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -377,7 +393,7 @@ func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
@@ -397,7 +413,7 @@ func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
if ok {
|
||||
return meta, nil
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
return meta, err
|
||||
}
|
||||
@@ -434,6 +450,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
_, _ = objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
|
||||
// Ensure heal opts for bucket metadata be deep healed all the time.
|
||||
ScanMode: madmin.HealDeepScan,
|
||||
Recreate: true,
|
||||
})
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -81,6 +81,12 @@ type BucketMetadata struct {
|
||||
ReplicationConfigXML []byte
|
||||
BucketTargetsConfigJSON []byte
|
||||
BucketTargetsConfigMetaJSON []byte
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.Policy
|
||||
@@ -98,9 +104,10 @@ type BucketMetadata struct {
|
||||
|
||||
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) BucketMetadata {
|
||||
now := UTCNow()
|
||||
return BucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
Created: now,
|
||||
notificationConfig: &event.Config{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
},
|
||||
@@ -120,7 +127,7 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
configFile := path.Join(bucketMetaPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -157,8 +164,13 @@ func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket strin
|
||||
if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// migrate unencrypted remote targets
|
||||
return b, b.migrateTargetConfig(ctx, objectAPI)
|
||||
if err = b.migrateTargetConfig(ctx, objectAPI); err != nil {
|
||||
return b, err
|
||||
}
|
||||
b.defaultTimestamps()
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseAllConfigs will parse all configs and populate the private fields.
|
||||
@@ -277,7 +289,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
for _, legacyFile := range legacyConfigs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
@@ -338,7 +350,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
for legacyFile := range configs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -347,6 +359,32 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
return nil
|
||||
}
|
||||
|
||||
// default timestamps to metadata Created timestamp if unset.
|
||||
func (b *BucketMetadata) defaultTimestamps() {
|
||||
if b.PolicyConfigUpdatedAt.IsZero() {
|
||||
b.PolicyConfigUpdatedAt = b.Created
|
||||
}
|
||||
if b.EncryptionConfigUpdatedAt.IsZero() {
|
||||
b.EncryptionConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.TaggingConfigUpdatedAt.IsZero() {
|
||||
b.TaggingConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.ObjectLockConfigUpdatedAt.IsZero() {
|
||||
b.ObjectLockConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.QuotaConfigUpdatedAt.IsZero() {
|
||||
b.QuotaConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.ReplicationConfigUpdatedAt.IsZero() {
|
||||
b.ReplicationConfigUpdatedAt = b.Created
|
||||
}
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
if err := b.parseAllConfigs(ctx, api); err != nil {
|
||||
@@ -365,7 +403,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, api, configFile, data)
|
||||
}
|
||||
|
||||
@@ -375,9 +413,10 @@ func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string)
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
path.Join(replicationDir, resyncFileName),
|
||||
}
|
||||
for _, metaFile := range metadataFiles {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, metaFile)
|
||||
configFile := path.Join(bucketMetaPrefix, bucket, metaFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -108,6 +108,42 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigUpdatedAt":
|
||||
z.PolicyConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigUpdatedAt":
|
||||
z.ObjectLockConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigUpdatedAt":
|
||||
z.EncryptionConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigUpdatedAt":
|
||||
z.TaggingConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigUpdatedAt":
|
||||
z.QuotaConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigUpdatedAt":
|
||||
z.ReplicationConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -121,9 +157,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 14
|
||||
// map header, size 20
|
||||
// write "Name"
|
||||
err = en.Append(0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
err = en.Append(0xde, 0x0, 0x14, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -262,15 +298,75 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
// write "PolicyConfigUpdatedAt"
|
||||
err = en.Append(0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.PolicyConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "ObjectLockConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ObjectLockConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "EncryptionConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.EncryptionConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "TaggingConfigUpdatedAt"
|
||||
err = en.Append(0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.TaggingConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "QuotaConfigUpdatedAt"
|
||||
err = en.Append(0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.QuotaConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "ReplicationConfigUpdatedAt"
|
||||
err = en.Append(0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ReplicationConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 14
|
||||
// map header, size 20
|
||||
// string "Name"
|
||||
o = append(o, 0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = append(o, 0xde, 0x0, 0x14, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
@@ -311,6 +407,24 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "BucketTargetsConfigMetaJSON"
|
||||
o = append(o, 0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.BucketTargetsConfigMetaJSON)
|
||||
// string "PolicyConfigUpdatedAt"
|
||||
o = append(o, 0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.PolicyConfigUpdatedAt)
|
||||
// string "ObjectLockConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.ObjectLockConfigUpdatedAt)
|
||||
// string "EncryptionConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.EncryptionConfigUpdatedAt)
|
||||
// string "TaggingConfigUpdatedAt"
|
||||
o = append(o, 0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.TaggingConfigUpdatedAt)
|
||||
// string "QuotaConfigUpdatedAt"
|
||||
o = append(o, 0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.QuotaConfigUpdatedAt)
|
||||
// string "ReplicationConfigUpdatedAt"
|
||||
o = append(o, 0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.ReplicationConfigUpdatedAt)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -416,6 +530,42 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigUpdatedAt":
|
||||
z.PolicyConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigUpdatedAt":
|
||||
z.ObjectLockConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigUpdatedAt":
|
||||
z.EncryptionConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigUpdatedAt":
|
||||
z.TaggingConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigUpdatedAt":
|
||||
z.QuotaConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigUpdatedAt":
|
||||
z.ReplicationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -430,6 +580,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON)
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
bucketConfigPrefix = "buckets"
|
||||
bucketNotificationConfig = "notification.xml"
|
||||
)
|
||||
|
||||
@@ -72,8 +71,8 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalServerRegion)
|
||||
if err = config.Validate(globalServerRegion, globalNotificationSys.targetList); err != nil {
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalNotificationSys.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
@@ -145,7 +144,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerRegion, globalNotificationSys.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
@@ -161,7 +160,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
||||
return r, nil
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketObjectLockConfigNotFound); ok {
|
||||
return r, nil
|
||||
@@ -58,6 +58,10 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
||||
// enforceRetentionForDeletion checks if it is appropriate to remove an
|
||||
// object according to locking configuration when this is lifecycle/ bucket quota asking.
|
||||
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
|
||||
if objInfo.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
|
||||
return true
|
||||
@@ -175,68 +179,76 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) (ObjectInfo, APIErrorCode) {
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi ObjectInfo, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) error {
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return oi, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
|
||||
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
|
||||
// Pass in relative days from current time, to additionally
|
||||
// to verify "object-lock-remaining-retention-days" policy if any.
|
||||
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
// Retention has expired you may change whatever you like.
|
||||
if ret.RetainUntilDate.Before(t) {
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred,
|
||||
owner)
|
||||
return oi, perm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch ret.Mode {
|
||||
case objectlock.RetGovernance:
|
||||
govPerm := isPutRetentionAllowed(bucket, object, days,
|
||||
govPerm := isPutRetentionAllowed(oi.Bucket, oi.Name, days,
|
||||
objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
byPassSet, r, cred, owner)
|
||||
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
|
||||
if !byPassSet {
|
||||
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
}
|
||||
return oi, govPerm
|
||||
switch govPerm {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
case objectlock.RetCompliance:
|
||||
// Compliance retention mode cannot be changed or shortened.
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
|
||||
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
compliancePerm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
false, r, cred, owner)
|
||||
return oi, compliancePerm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return oi, ErrNone
|
||||
return nil
|
||||
} // No pre-existing retention metadata present.
|
||||
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred, owner)
|
||||
return oi, perm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
|
||||
|
||||
@@ -103,7 +103,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -148,7 +148,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -37,60 +37,72 @@ import (
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonWriteOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketMultipartUploadsAction,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketMultipartUploadsAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonReadOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,11 +113,12 @@ func TestCreateBucket(t *testing.T) {
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
var start = make(chan struct{})
|
||||
start := make(chan struct{})
|
||||
var ok, errs int
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
@@ -147,8 +160,8 @@ func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -333,7 +346,6 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -352,14 +364,12 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
@@ -369,7 +379,8 @@ func TestGetBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
|
||||
func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}`
|
||||
|
||||
@@ -465,7 +476,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -540,7 +550,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -559,7 +568,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -575,8 +583,8 @@ func TestDeleteBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{
|
||||
"Version": "2012-10-17",
|
||||
@@ -743,7 +751,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -762,7 +769,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
@@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
switch k {
|
||||
case ldapUser:
|
||||
args["user"] = []string{vStr}
|
||||
} else if k == ldapUserN {
|
||||
case ldapUserN:
|
||||
args["username"] = []string{vStr}
|
||||
} else {
|
||||
default:
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
@@ -199,7 +200,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
|
||||
}
|
||||
|
||||
var policyInfo miniogopolicy.BucketAccessPolicy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
@@ -217,7 +218,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
|
||||
}
|
||||
|
||||
var bucketPolicy policy.Policy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
|
||||
@@ -20,11 +20,11 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ type BucketQuotaSys struct {
|
||||
}
|
||||
|
||||
// Get - Get quota configuration.
|
||||
func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
|
||||
func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.BucketQuota, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
@@ -42,8 +42,8 @@ func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
|
||||
}
|
||||
return &madmin.BucketQuota{}, nil
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetQuotaConfig(bucketName)
|
||||
qCfg, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName)
|
||||
return qCfg, err
|
||||
}
|
||||
|
||||
// NewBucketQuotaSys returns initialized BucketQuotaSys
|
||||
@@ -51,6 +51,35 @@ func NewBucketQuotaSys() *BucketQuotaSys {
|
||||
return &BucketQuotaSys{}
|
||||
}
|
||||
|
||||
// Init initialize bucket quota.
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
if err != nil {
|
||||
return BucketUsageInfo{}, err
|
||||
}
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if !ok {
|
||||
return BucketUsageInfo{}, fmt.Errorf("internal error: Unexpected DUI data type: %T", v)
|
||||
}
|
||||
|
||||
bui := dui.BucketsUsage[bucket]
|
||||
return bui, nil
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
|
||||
quotaCfg = &madmin.BucketQuota{}
|
||||
@@ -58,50 +87,32 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
return quotaCfg, err
|
||||
}
|
||||
if !quotaCfg.IsValid() {
|
||||
if quotaCfg.Type == "fifo" {
|
||||
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"))
|
||||
return quotaCfg, nil
|
||||
}
|
||||
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
q, err := sys.Get(bucket)
|
||||
q, err := sys.Get(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if !ok {
|
||||
return fmt.Errorf("internal error: Unexpected DUI data type: %T", v)
|
||||
}
|
||||
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket not found, cannot enforce quota
|
||||
// call will fail anyways later.
|
||||
return nil
|
||||
}
|
||||
|
||||
if (bui.Size + uint64(size)) >= q.Quota {
|
||||
if bui.Size > 0 && ((bui.Size + uint64(size)) >= q.Quota) {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
@@ -109,120 +120,9 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
func enforceBucketQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if globalBucketQuotaSys == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return globalBucketQuotaSys.check(ctx, bucket, size)
|
||||
}
|
||||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota.
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
return
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if bui.Size > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = bui.Size - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
for obj := range objInfoCh {
|
||||
if obj.DeleteMarker {
|
||||
// Delete markers are automatically added for FIFO purge.
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
continue
|
||||
}
|
||||
// skip objects currently under retention
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
}
|
||||
|
||||
// If we saw less than quota we are good.
|
||||
if scorer.seenBytes <= cfg.Quota {
|
||||
return
|
||||
}
|
||||
// Calculate how much we want to delete now.
|
||||
toFreeNow := scorer.seenBytes - cfg.Quota
|
||||
// We were less over quota than we thought. Adjust so we delete less.
|
||||
// If we are more over, leave it for the next run to pick up.
|
||||
if toFreeNow < toFree {
|
||||
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
|
||||
// We got below or at quota.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var objects []ObjectToDelete
|
||||
numKeys := len(scorer.fileObjInfos())
|
||||
for i, obj := range scorer.fileObjInfos() {
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectName: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
if len(objects) < maxDeleteList && (i < numKeys-1) {
|
||||
// skip deletion until maxDeleteList or end of slice
|
||||
continue
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Deletes a list of objects.
|
||||
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
})
|
||||
for i := range deleteErrs {
|
||||
if deleteErrs[i] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: obj,
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
return globalBucketQuotaSys.enforceQuotaHard(ctx, bucket, size)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
@@ -52,8 +51,10 @@ func (r *ReplicationStats) Delete(bucket string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
delete(r.Cache, bucket)
|
||||
delete(r.UsageCache, bucket)
|
||||
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
delete(r.UsageCache, bucket)
|
||||
}
|
||||
|
||||
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
||||
@@ -68,54 +69,56 @@ func (r *ReplicationStats) UpdateReplicaStat(bucket string, n int64) {
|
||||
if !ok {
|
||||
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
}
|
||||
atomic.AddInt64(&bs.ReplicaSize, n)
|
||||
bs.ReplicaSize += n
|
||||
r.Cache[bucket] = bs
|
||||
}
|
||||
|
||||
// Update updates in-memory replication statistics with new values.
|
||||
func (r *ReplicationStats) Update(bucket string, arn string, n int64, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
func (r *ReplicationStats) Update(bucket string, arn string, n int64, duration time.Duration, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
r.RLock()
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
bs, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
r.Cache[bucket] = bs
|
||||
}
|
||||
b, ok := bs.Stats[arn]
|
||||
if !ok {
|
||||
b = &BucketReplicationStat{}
|
||||
bs.Stats[arn] = b
|
||||
}
|
||||
r.RUnlock()
|
||||
switch status {
|
||||
case replication.Completed:
|
||||
switch prevStatus { // adjust counters based on previous state
|
||||
case replication.Failed:
|
||||
atomic.AddInt64(&b.FailedCount, -1)
|
||||
b.FailedCount--
|
||||
}
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddInt64(&b.ReplicatedSize, n)
|
||||
b.ReplicatedSize += n
|
||||
switch prevStatus {
|
||||
case replication.Failed:
|
||||
atomic.AddInt64(&b.FailedSize, -1*n)
|
||||
b.FailedSize -= n
|
||||
}
|
||||
if duration > 0 {
|
||||
b.Latency.update(n, duration)
|
||||
}
|
||||
}
|
||||
case replication.Failed:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
if prevStatus == replication.Pending {
|
||||
atomic.AddInt64(&b.FailedSize, n)
|
||||
atomic.AddInt64(&b.FailedCount, 1)
|
||||
b.FailedSize += n
|
||||
b.FailedCount++
|
||||
}
|
||||
}
|
||||
case replication.Replica:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddInt64(&b.ReplicaSize, n)
|
||||
b.ReplicaSize += n
|
||||
}
|
||||
}
|
||||
r.Lock()
|
||||
bs.Stats[arn] = b
|
||||
r.Cache[bucket] = bs
|
||||
r.Unlock()
|
||||
}
|
||||
|
||||
// GetInitialUsage get replication metrics available at the time of cluster initialization
|
||||
@@ -126,10 +129,10 @@ func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats
|
||||
r.ulock.RLock()
|
||||
defer r.ulock.RUnlock()
|
||||
st, ok := r.UsageCache[bucket]
|
||||
if ok {
|
||||
return st.Clone()
|
||||
if !ok {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
return BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
return st.Clone()
|
||||
}
|
||||
|
||||
// Get replication metrics for a bucket from this node since this node came up.
|
||||
@@ -143,7 +146,7 @@ func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
||||
|
||||
st, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
return BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
return st.Clone()
|
||||
}
|
||||
@@ -160,41 +163,46 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio
|
||||
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
||||
rTimer := time.NewTimer(time.Minute * 1)
|
||||
defer rTimer.Stop()
|
||||
var (
|
||||
dui DataUsageInfo
|
||||
err error
|
||||
)
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-rTimer.C:
|
||||
dui, err := loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
||||
dui, err = loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// data usage has not captured any data yet.
|
||||
if dui.LastUpdate.IsZero() {
|
||||
continue
|
||||
// If LastUpdate is set, data usage is available.
|
||||
if !dui.LastUpdate.IsZero() {
|
||||
break outer
|
||||
}
|
||||
m := make(map[string]*BucketReplicationStats)
|
||||
for bucket, usage := range dui.BucketsUsage {
|
||||
b := &BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(usage.ReplicationInfo)),
|
||||
}
|
||||
for arn, uinfo := range usage.ReplicationInfo {
|
||||
b.Stats[arn] = &BucketReplicationStat{
|
||||
FailedSize: int64(uinfo.ReplicationFailedSize),
|
||||
ReplicatedSize: int64(uinfo.ReplicatedSize),
|
||||
ReplicaSize: int64(uinfo.ReplicaSize),
|
||||
FailedCount: int64(uinfo.ReplicationFailedCount),
|
||||
}
|
||||
}
|
||||
b.ReplicaSize += int64(usage.ReplicaSize)
|
||||
if b.hasReplicationUsage() {
|
||||
m[bucket] = b
|
||||
}
|
||||
}
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
r.UsageCache = m
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
m := make(map[string]*BucketReplicationStats)
|
||||
for bucket, usage := range dui.BucketsUsage {
|
||||
b := &BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(usage.ReplicationInfo)),
|
||||
}
|
||||
for arn, uinfo := range usage.ReplicationInfo {
|
||||
b.Stats[arn] = &BucketReplicationStat{
|
||||
FailedSize: int64(uinfo.ReplicationFailedSize),
|
||||
ReplicatedSize: int64(uinfo.ReplicatedSize),
|
||||
ReplicaSize: int64(uinfo.ReplicaSize),
|
||||
FailedCount: int64(uinfo.ReplicationFailedCount),
|
||||
}
|
||||
}
|
||||
b.ReplicaSize += int64(usage.ReplicaSize)
|
||||
if b.hasReplicationUsage() {
|
||||
m[bucket] = b
|
||||
}
|
||||
}
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
r.UsageCache = m
|
||||
}
|
||||
|
||||
@@ -20,12 +20,15 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
|
||||
//go:generate msgp -file=$GOFILE
|
||||
@@ -34,6 +37,7 @@ import (
|
||||
type replicatedTargetInfo struct {
|
||||
Arn string
|
||||
Size int64
|
||||
Duration time.Duration
|
||||
ReplicationAction replicationAction // full or metadata only
|
||||
OpType replication.Type // whether incoming replication, existing object, healing etc..
|
||||
ReplicationStatus replication.StatusType
|
||||
@@ -172,7 +176,7 @@ func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.Sta
|
||||
type replicateTargetDecision struct {
|
||||
Replicate bool // Replicate to this target
|
||||
Synchronous bool // Synchronous replication configured.
|
||||
Arn string //ARN of replication target
|
||||
Arn string // ARN of replication target
|
||||
ID string
|
||||
}
|
||||
|
||||
@@ -501,8 +505,10 @@ func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) Repl
|
||||
var tgtStatuses map[string]replication.StatusType
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
dsc = checkReplicateDelete(GlobalContext, oi.Bucket, ObjectToDelete{
|
||||
ObjectName: oi.Name,
|
||||
VersionID: oi.VersionID,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: oi.Name,
|
||||
VersionID: oi.VersionID,
|
||||
},
|
||||
}, oi, ObjectOptions{}, nil)
|
||||
} else {
|
||||
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(ObjectInfo{
|
||||
@@ -573,8 +579,23 @@ type ResyncTargetsInfo struct {
|
||||
|
||||
// ResyncTarget is a struct representing the Target reset ID where target is identified by its Arn
|
||||
type ResyncTarget struct {
|
||||
Arn string `json:"arn"`
|
||||
ResetID string `json:"resetid"`
|
||||
Arn string `json:"arn"`
|
||||
ResetID string `json:"resetid"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
EndTime time.Time `json:"endTime"`
|
||||
// Status of resync operation
|
||||
ResyncStatus string `json:"resyncStatus,omitempty"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize"`
|
||||
// Total number of failed operations
|
||||
FailedCount int64 `json:"failedReplicationCount"`
|
||||
// Total number of failed operations
|
||||
ReplicatedCount int64 `json:"replicationCount"`
|
||||
// Last bucket/object replicated.
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Object string `json:"object,omitempty"`
|
||||
}
|
||||
|
||||
// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication
|
||||
@@ -600,3 +621,106 @@ func (v VersionPurgeStatusType) Empty() bool {
|
||||
func (v VersionPurgeStatusType) Pending() bool {
|
||||
return v == Pending || v == Failed
|
||||
}
|
||||
|
||||
type replicationResyncState struct {
|
||||
// map of bucket to their resync status
|
||||
statusMap map[string]BucketReplicationResyncStatus
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
const (
|
||||
replicationDir = "replication"
|
||||
resyncFileName = "resync.bin"
|
||||
resyncMetaFormat = 1
|
||||
resyncMetaVersionV1 = 1
|
||||
resyncMetaVersion = resyncMetaVersionV1
|
||||
)
|
||||
|
||||
// ResyncStatusType status of resync operation
|
||||
type ResyncStatusType int
|
||||
|
||||
const (
|
||||
// NoResync - no resync in progress
|
||||
NoResync ResyncStatusType = iota
|
||||
// ResyncStarted - resync in progress
|
||||
ResyncStarted
|
||||
// ResyncCompleted - resync finished
|
||||
ResyncCompleted
|
||||
// ResyncFailed - resync failed
|
||||
ResyncFailed
|
||||
)
|
||||
|
||||
func (rt ResyncStatusType) String() string {
|
||||
switch rt {
|
||||
case ResyncStarted:
|
||||
return "Ongoing"
|
||||
case ResyncCompleted:
|
||||
return "Completed"
|
||||
case ResyncFailed:
|
||||
return "Failed"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TargetReplicationResyncStatus status of resync of bucket for a specific target
|
||||
type TargetReplicationResyncStatus struct {
|
||||
StartTime time.Time `json:"startTime" msg:"st"`
|
||||
EndTime time.Time `json:"endTime" msg:"et"`
|
||||
// Resync ID assigned to this reset
|
||||
ResyncID string `json:"resyncID" msg:"id"`
|
||||
// ResyncBeforeDate - resync all objects created prior to this date
|
||||
ResyncBeforeDate time.Time `json:"resyncBeforeDate" msg:"rdt"`
|
||||
// Status of resync operation
|
||||
ResyncStatus ResyncStatusType `json:"resyncStatus" msg:"rst"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize" msg:"fs"`
|
||||
// Total number of failed operations
|
||||
FailedCount int64 `json:"failedReplicationCount" msg:"frc"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize" msg:"rs"`
|
||||
// Total number of failed operations
|
||||
ReplicatedCount int64 `json:"replicationCount" msg:"rrc"`
|
||||
// Last bucket/object replicated.
|
||||
Bucket string `json:"-" msg:"bkt"`
|
||||
Object string `json:"-" msg:"obj"`
|
||||
}
|
||||
|
||||
// BucketReplicationResyncStatus captures current replication resync status
|
||||
type BucketReplicationResyncStatus struct {
|
||||
Version int `json:"version" msg:"v"`
|
||||
// map of remote arn to their resync status for a bucket
|
||||
TargetsMap map[string]TargetReplicationResyncStatus `json:"resyncMap,omitempty" msg:"brs"`
|
||||
ID int `json:"id" msg:"id"`
|
||||
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
|
||||
}
|
||||
|
||||
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
|
||||
return BucketReplicationResyncStatus{
|
||||
TargetsMap: make(map[string]TargetReplicationResyncStatus),
|
||||
Version: resyncMetaVersion,
|
||||
}
|
||||
}
|
||||
|
||||
var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
|
||||
|
||||
// parse size from content-range header
|
||||
func parseSizeFromContentRange(h http.Header) (sz int64, err error) {
|
||||
cr := h.Get(xhttp.ContentRange)
|
||||
if cr == "" {
|
||||
return sz, fmt.Errorf("Content-Range not set")
|
||||
}
|
||||
parts := contentRangeRegexp.FindStringSubmatch(cr)
|
||||
if len(parts) != 4 {
|
||||
return sz, fmt.Errorf("invalid Content-Range header %s", cr)
|
||||
}
|
||||
if parts[3] == "*" {
|
||||
return -1, nil
|
||||
}
|
||||
var usz uint64
|
||||
usz, err = strconv.ParseUint(parts[3], 10, 64)
|
||||
if err != nil {
|
||||
return sz, err
|
||||
}
|
||||
return int64(usz), nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBucketReplicationResyncStatus(t *testing.T) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketReplicationResyncStatus(t *testing.T) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketReplicationResyncStatus Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketReplicationResyncStatus{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicateDecision(t *testing.T) {
|
||||
v := ReplicateDecision{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@@ -686,3 +799,116 @@ func BenchmarkDecodeResyncTargetsInfo(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalTargetReplicationResyncStatus(t *testing.T) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeTargetReplicationResyncStatus(t *testing.T) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeTargetReplicationResyncStatus Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := TargetReplicationResyncStatus{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ var replicatedInfosTests = []struct {
|
||||
expectedOpType replication.Type
|
||||
expectedAction replicationAction
|
||||
}{
|
||||
{ //1. empty tgtInfos slice
|
||||
{ // 1. empty tgtInfos slice
|
||||
name: "no replicated targets",
|
||||
tgtInfos: []replicatedTargetInfo{},
|
||||
expectedCompletedSize: 0,
|
||||
@@ -41,7 +41,7 @@ var replicatedInfosTests = []struct {
|
||||
expectedOpType: replication.UnsetReplicationType,
|
||||
expectedAction: replicateNone,
|
||||
},
|
||||
{ //2. replication completed to single target
|
||||
{ // 2. replication completed to single target
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
@@ -59,7 +59,7 @@ var replicatedInfosTests = []struct {
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ //3. replication completed to single target; failed to another
|
||||
{ // 3. replication completed to single target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
@@ -77,14 +77,15 @@ var replicatedInfosTests = []struct {
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 249,
|
||||
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ //4. replication pending on one target; failed to another
|
||||
{ // 4. replication pending on one target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
@@ -102,7 +103,8 @@ var replicatedInfosTests = []struct {
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 0,
|
||||
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
@@ -137,7 +139,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
expDsc ReplicateDecision
|
||||
expErr error
|
||||
}{
|
||||
{ //1.
|
||||
{ // 1.
|
||||
name: "empty string",
|
||||
dsc: "",
|
||||
expDsc: ReplicateDecision{
|
||||
@@ -146,7 +148,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
expErr: nil,
|
||||
},
|
||||
|
||||
{ //2.
|
||||
{ // 2.
|
||||
name: "replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: nil,
|
||||
@@ -156,7 +158,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ //3.
|
||||
{ // 3.
|
||||
name: "replicate decision for multiple targets",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
|
||||
expErr: nil,
|
||||
@@ -167,7 +169,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ //4.
|
||||
{ // 4.
|
||||
name: "invalid format replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: errInvalidReplicateDecisionFormat,
|
||||
@@ -181,9 +183,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
|
||||
func TestParseReplicateDecision(t *testing.T) {
|
||||
for i, test := range parseReplicationDecisionTest {
|
||||
//dsc, err := parseReplicateDecision(test.dsc)
|
||||
dsc, err := parseReplicateDecision(test.expDsc.String())
|
||||
|
||||
if err != nil {
|
||||
if test.expErr != err {
|
||||
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
||||
@@ -208,22 +208,22 @@ var replicationStateTest = []struct {
|
||||
arn string
|
||||
expStatus replication.StatusType
|
||||
}{
|
||||
{ //1. no replication status header
|
||||
{ // 1. no replication status header
|
||||
name: "no replicated targets",
|
||||
rs: ReplicationState{},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ //2. replication status for one target
|
||||
{ // 2. replication status for one target
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
|
||||
expStatus: replication.Pending,
|
||||
},
|
||||
{ //3. replication status for one target - incorrect format
|
||||
{ // 3. replication status for one target - incorrect format
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ //4. replication status for 3 targets, one of them failed
|
||||
{ // 4. replication status for 3 targets, one of them failed
|
||||
name: "replication status for 3 targets - one failed",
|
||||
rs: ReplicationState{
|
||||
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
|
||||
@@ -231,7 +231,7 @@ var replicationStateTest = []struct {
|
||||
},
|
||||
expStatus: replication.Failed,
|
||||
},
|
||||
{ //5. replication status for replica version
|
||||
{ // 5. replication status for replica version
|
||||
name: "replication status for replica version",
|
||||
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
|
||||
expStatus: replication.Replica,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user