Compare commits
880 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
892a204013 | ||
|
|
0e6aedc7ed | ||
|
|
c547a4d835 | ||
|
|
fc9668baa5 | ||
|
|
ba17d46f15 | ||
|
|
54a4f93854 | ||
|
|
bdd816488d | ||
|
|
36dcfee2f7 | ||
|
|
4d13ddf6b3 | ||
|
|
9e25475475 | ||
|
|
e955aa7f2a | ||
|
|
81d2b54dfd | ||
|
|
7956ff0313 | ||
|
|
9ff25fb64b | ||
|
|
04df69f633 | ||
|
|
908eb57795 | ||
|
|
ecfae074dc | ||
|
|
be5d394e56 | ||
|
|
849a27ee61 | ||
|
|
062f3ea43a | ||
|
|
5cfedcfe33 | ||
|
|
4d2fc530d0 | ||
|
|
3970204009 | ||
|
|
f046f557fa | ||
|
|
401958938d | ||
|
|
566cffe53d | ||
|
|
028bc2f9be | ||
|
|
813d9bc316 | ||
|
|
79ba458051 | ||
|
|
cf220be9b5 | ||
|
|
c433572585 | ||
|
|
a42b576382 | ||
|
|
fb9b53026d | ||
|
|
2ac54e5a7b | ||
|
|
8eecdc6d1f | ||
|
|
50577e2bd2 | ||
|
|
7bc1f986e8 | ||
|
|
d796621ccc | ||
|
|
751e9fb7be | ||
|
|
f6113264f4 | ||
|
|
a3534a730b | ||
|
|
7f8b8a0e43 | ||
|
|
bd6f7b6d83 | ||
|
|
b0a4beb66a | ||
|
|
472c2d828c | ||
|
|
01ee49045e | ||
|
|
7bd9f821dd | ||
|
|
b20ecc7b54 | ||
|
|
61eb9d4e29 | ||
|
|
43eb5a001c | ||
|
|
f58692abb7 | ||
|
|
c1760fb764 | ||
|
|
e9bc0e7e98 | ||
|
|
ffcadcd99e | ||
|
|
7a733a8d54 | ||
|
|
ce97313fda | ||
|
|
7b81967a3c | ||
|
|
ff811f594b | ||
|
|
0bf80b3c89 | ||
|
|
ae3b369fe1 | ||
|
|
77b15e7194 | ||
|
|
20537f974e | ||
|
|
4476a64bdf | ||
|
|
d4b701576e | ||
|
|
721c053712 | ||
|
|
e3071157f0 | ||
|
|
c07af89e48 | ||
|
|
9c846106fa | ||
|
|
cf94d1f1f1 | ||
|
|
6187440f35 | ||
|
|
57b7c3494f | ||
|
|
dda18c28c5 | ||
|
|
f8d6eaaa96 | ||
|
|
47d4fabb58 | ||
|
|
80039f60d5 | ||
|
|
5a5e9b8a89 | ||
|
|
b7ed3b77bd | ||
|
|
75b925c326 | ||
|
|
91d419ee6c | ||
|
|
23345098ea | ||
|
|
7ce91ea1a1 | ||
|
|
41079f1015 | ||
|
|
712dfa40cd | ||
|
|
decfd6108c | ||
|
|
b890bbfa63 | ||
|
|
0e3a570b85 | ||
|
|
7060c809c0 | ||
|
|
9dbfd84c5b | ||
|
|
fce380a044 | ||
|
|
46ba15ab03 | ||
|
|
1e39ca39c3 | ||
|
|
80ef1ae51c | ||
|
|
4d0715d226 | ||
|
|
8a274169da | ||
|
|
21d8298fe1 | ||
|
|
5d6f6d8d5b | ||
|
|
bacf6156c1 | ||
|
|
1d1b213f1f | ||
|
|
1f11af42f1 | ||
|
|
a026c8748f | ||
|
|
9f7d89b3cd | ||
|
|
92a77cc78e | ||
|
|
b0c84e3de7 | ||
|
|
bbc914e174 | ||
|
|
3fca4055d2 | ||
|
|
66afa16aed | ||
|
|
9b0a8de7de | ||
|
|
04bbede17d | ||
|
|
0e3bafcc54 | ||
|
|
b48f719b8e | ||
|
|
289fcbd08c | ||
|
|
f6875bb893 | ||
|
|
7e803adf13 | ||
|
|
5b5deee5b3 | ||
|
|
7dae4cb685 | ||
|
|
27fad98179 | ||
|
|
58f7e3a829 | ||
|
|
4a15bd8ff8 | ||
|
|
b030ef1aca | ||
|
|
cc46a99f97 | ||
|
|
becec6cb6b | ||
|
|
bc33db9fc0 | ||
|
|
7d4579e737 | ||
|
|
b7c90751b0 | ||
|
|
88fd1cba71 | ||
|
|
e43cc316ff | ||
|
|
e3f24a29fa | ||
|
|
890e526bde | ||
|
|
16ce455fca | ||
|
|
29b7164468 | ||
|
|
acdd03f609 | ||
|
|
03b35ecdd0 | ||
|
|
5307e18085 | ||
|
|
2cea944cdb | ||
|
|
c08540c7b7 | ||
|
|
3934700a08 | ||
|
|
0913eb6655 | ||
|
|
1bfbe354f5 | ||
|
|
2d78e20120 | ||
|
|
77210513c9 | ||
|
|
2e6f8bdf19 | ||
|
|
25144fedd5 | ||
|
|
27f64dd9a4 | ||
|
|
9d7648f02f | ||
|
|
1ef8babfef | ||
|
|
4ea7bf0510 | ||
|
|
5dcf1d13a9 | ||
|
|
94d37d05e5 | ||
|
|
0cbdc458c5 | ||
|
|
c1437c7b46 | ||
|
|
f357f65d04 | ||
|
|
ef8e952fc4 | ||
|
|
a2bc383e15 | ||
|
|
23930355a7 | ||
|
|
bb9f41e613 | ||
|
|
bc110d8055 | ||
|
|
b23b19e5c3 | ||
|
|
65b1a4282e | ||
|
|
1dbb3f6f43 | ||
|
|
af3dc25dfe | ||
|
|
5a0c0079a1 | ||
|
|
af8f563ed3 | ||
|
|
93af4a4864 | ||
|
|
28f188e3ef | ||
|
|
b29224f62f | ||
|
|
d756da41b9 | ||
|
|
cdab4a3b85 | ||
|
|
b88c57ba93 | ||
|
|
1a5496eced | ||
|
|
b264e6a191 | ||
|
|
ae1b495262 | ||
|
|
16939ca192 | ||
|
|
60cd513a33 | ||
|
|
27d94c64ed | ||
|
|
21a0f857d3 | ||
|
|
03a6e8aee2 | ||
|
|
d0862ddf86 | ||
|
|
4afbb89774 | ||
|
|
5ec57a9533 | ||
|
|
f088e8960b | ||
|
|
27dec42ad6 | ||
|
|
b70053090c | ||
|
|
f10e2254ae | ||
|
|
1f92fc3fc0 | ||
|
|
f71b114a84 | ||
|
|
e3e0532613 | ||
|
|
2c0f121550 | ||
|
|
6f41cff75a | ||
|
|
9b39616c1b | ||
|
|
fad3d66093 | ||
|
|
ff99ef74c8 | ||
|
|
6990e73b11 | ||
|
|
860a1237ab | ||
|
|
97b5bf1fb7 | ||
|
|
ed3418c046 | ||
|
|
a2230868e0 | ||
|
|
71bab74148 | ||
|
|
661ea57907 | ||
|
|
1f18efb0ba | ||
|
|
8ae46bce93 | ||
|
|
f19a414e09 | ||
|
|
0ee2933234 | ||
|
|
9890f579f8 | ||
|
|
2ee337ead5 | ||
|
|
362e14fa1a | ||
|
|
524fe62594 | ||
|
|
3c87e1e60d | ||
|
|
0cac868a36 | ||
|
|
2480c66857 | ||
|
|
186c477f3c | ||
|
|
570670be8c | ||
|
|
22b7226581 | ||
|
|
f16f715b59 | ||
|
|
75adb787c4 | ||
|
|
6123377e66 | ||
|
|
778cccb15d | ||
|
|
0256dae657 | ||
|
|
88a93838de | ||
|
|
0855988427 | ||
|
|
84b121bbe1 | ||
|
|
48fb7b0dd7 | ||
|
|
01e550a9be | ||
|
|
63a2e0bab6 | ||
|
|
24657859a8 | ||
|
|
67d07e895c | ||
|
|
d7df6bc738 | ||
|
|
a4e1de93a7 | ||
|
|
41be557f0c | ||
|
|
9417fd933e | ||
|
|
067d21d0f2 | ||
|
|
3882da6ac5 | ||
|
|
77b780b8ca | ||
|
|
127e8bf3b6 | ||
|
|
74faed166a | ||
|
|
dbd05d6e82 | ||
|
|
b5d35c7e09 | ||
|
|
c39eb3bacd | ||
|
|
57fad9148c | ||
|
|
0f88cdc80e | ||
|
|
e2a9949b16 | ||
|
|
38e3c7a8f7 | ||
|
|
67f166fa02 | ||
|
|
c7df5fb119 | ||
|
|
a4be47d7ad | ||
|
|
aaea94a48d | ||
|
|
c3d9c45f58 | ||
|
|
a2a48cc065 | ||
|
|
cf407f7176 | ||
|
|
d6dd17a483 | ||
|
|
a66071099c | ||
|
|
9a6e569412 | ||
|
|
7dfa565d00 | ||
|
|
d2e5f01542 | ||
|
|
f4e373e0d2 | ||
|
|
c8691db2b7 | ||
|
|
affe51cb19 | ||
|
|
57118919d2 | ||
|
|
7db05a80dd | ||
|
|
a8ba71edef | ||
|
|
45a99c3fd3 | ||
|
|
58e6b83e95 | ||
|
|
f556a72fe2 | ||
|
|
cd7a5cab8a | ||
|
|
67b5e0dbe8 | ||
|
|
b68f0cbde4 | ||
|
|
ebc3627c73 | ||
|
|
295730408b | ||
|
|
5a9f133491 | ||
|
|
f30afa4956 | ||
|
|
171cedf0f0 | ||
|
|
27d8ef14f8 | ||
|
|
f6d13f57bb | ||
|
|
5f36167f1a | ||
|
|
8fb4ae916c | ||
|
|
48da4aeee0 | ||
|
|
07df9eecda | ||
|
|
7f214a0e46 | ||
|
|
e1a0a1e73c | ||
|
|
1278b0ec73 | ||
|
|
3e9bd931ed | ||
|
|
9d588319dd | ||
|
|
0012ca8ca5 | ||
|
|
288e276abe | ||
|
|
f22e745514 | ||
|
|
070c31eac5 | ||
|
|
1a56ebea70 | ||
|
|
70e1cbda21 | ||
|
|
1ede3967c1 | ||
|
|
60f2df54e0 | ||
|
|
ba708f51f2 | ||
|
|
b106b1c131 | ||
|
|
0df31f63ab | ||
|
|
64d4da5a37 | ||
|
|
7aec38a73e | ||
|
|
a2fd8caa69 | ||
|
|
f546636c52 | ||
|
|
38ccc4f672 | ||
|
|
04e669a6be | ||
|
|
cc3f139d1f | ||
|
|
d50442da01 | ||
|
|
404b05a44c | ||
|
|
3d7c1ad31d | ||
|
|
d300e775a6 | ||
|
|
7ee2d1c339 | ||
|
|
54a98773f8 | ||
|
|
737a3f0bad | ||
|
|
3bd9636a5b | ||
|
|
76b21de0c6 | ||
|
|
dabb058167 | ||
|
|
f394313fee | ||
|
|
b7c5e45fff | ||
|
|
0a224654c2 | ||
|
|
47e4a36d7e | ||
|
|
e420a1de4d | ||
|
|
62dc0f7698 | ||
|
|
2d31d92271 | ||
|
|
1981fe2072 | ||
|
|
f68bd37acf | ||
|
|
76877eb6fa | ||
|
|
3d66d053c7 | ||
|
|
0d3ae3810f | ||
|
|
0e31cff762 | ||
|
|
c27110e37d | ||
|
|
89441a22aa | ||
|
|
557135185c | ||
|
|
4d39fd4165 | ||
|
|
f4c03e56b8 | ||
|
|
d2b6aa9033 | ||
|
|
5dd40b9377 | ||
|
|
001b77e7e1 | ||
|
|
9d91d32d82 | ||
|
|
a60ac7ca17 | ||
|
|
42ba0da6b0 | ||
|
|
f527c708f2 | ||
|
|
6f474982ed | ||
|
|
fd6cd52728 | ||
|
|
c9e49f4366 | ||
|
|
46fd9f4a53 | ||
|
|
4da641533d | ||
|
|
79df2c7ce7 | ||
|
|
3e28af1723 | ||
|
|
866a95de38 | ||
|
|
6aa0574a53 | ||
|
|
bb97eafa82 | ||
|
|
51d5efee1b | ||
|
|
c980804514 | ||
|
|
b883803b21 | ||
|
|
7e3a7d7044 | ||
|
|
9ad6012782 | ||
|
|
5a96cbbeaa | ||
|
|
416977436e | ||
|
|
54ec0a1308 | ||
|
|
ebd78e983f | ||
|
|
1cf726348f | ||
|
|
41f75e6d1b | ||
|
|
0e3037631f | ||
|
|
6fbf4f96b6 | ||
|
|
e35709a99e | ||
|
|
f3602d7d08 | ||
|
|
526e10a2e0 | ||
|
|
0b21734571 | ||
|
|
499872f31d | ||
|
|
5cc16e098c | ||
|
|
364e27d5f2 | ||
|
|
1f4e0bd17c | ||
|
|
0557e18472 | ||
|
|
cfd66ab8c3 | ||
|
|
691b763613 | ||
|
|
3ddb501190 | ||
|
|
997e808088 | ||
|
|
13441ad0f8 | ||
|
|
aa508591c1 | ||
|
|
818f0201fc | ||
|
|
890f43ffa5 | ||
|
|
e270ab65b3 | ||
|
|
926373f9c1 | ||
|
|
111c6177d2 | ||
|
|
91f72f25ab | ||
|
|
da540ccf8c | ||
|
|
d6396f82fe | ||
|
|
4fa250a6a1 | ||
|
|
b42cfcea60 | ||
|
|
aca6dfbd60 | ||
|
|
5f7e6d03ff | ||
|
|
88ad742da0 | ||
|
|
a8d4042853 | ||
|
|
44a9339c0a | ||
|
|
113c7ff49a | ||
|
|
40dbe243d9 | ||
|
|
d422d24278 | ||
|
|
109c927dad | ||
|
|
de400f3473 | ||
|
|
8144a125ce | ||
|
|
3e34e41a5a | ||
|
|
2270887d43 | ||
|
|
c6431f9a04 | ||
|
|
88c0d0120c | ||
|
|
44fefe5b9f | ||
|
|
878d368cea | ||
|
|
f2bd026d0e | ||
|
|
518612492c | ||
|
|
81e43b87c2 | ||
|
|
a02e17f15c | ||
|
|
c76f86fdbd | ||
|
|
5b7c00ff52 | ||
|
|
b9f0046ee7 | ||
|
|
3b79f7e4ae | ||
|
|
85d2df02b9 | ||
|
|
2f1e8ba612 | ||
|
|
84c690cb07 | ||
|
|
239bbad7ab | ||
|
|
4be8023408 | ||
|
|
83e8da57b8 | ||
|
|
dcff6c996d | ||
|
|
0a66a6f1e5 | ||
|
|
e82a5c5c54 | ||
|
|
92fdcafb66 | ||
|
|
b9aae1aaae | ||
|
|
7d70afc937 | ||
|
|
12b63061c2 | ||
|
|
038fdeea83 | ||
|
|
0b6225bcc3 | ||
|
|
f286ef8e17 | ||
|
|
b120bcb60a | ||
|
|
be34fc9134 | ||
|
|
8591d17d82 | ||
|
|
f6190d6751 | ||
|
|
f0fc77fded | ||
|
|
f56cac6381 | ||
|
|
4f35054d29 | ||
|
|
d29df6714a | ||
|
|
7460fb8349 | ||
|
|
a7c430355a | ||
|
|
1df1517449 | ||
|
|
f7c357ebad | ||
|
|
20c60aae68 | ||
|
|
b14527b7af | ||
|
|
f840080e5b | ||
|
|
2c6983a2f1 | ||
|
|
acfb83ec5e | ||
|
|
3db931dc0e | ||
|
|
8309ddd486 | ||
|
|
21c868a646 | ||
|
|
ffe9acfe4a | ||
|
|
24d904d194 | ||
|
|
b280a37c4d | ||
|
|
906548d0ba | ||
|
|
1485a5bf3b | ||
|
|
9ec197f2e8 | ||
|
|
d21466f595 | ||
|
|
4f3290309e | ||
|
|
d6fe0f61a9 | ||
|
|
5a22f2cf0b | ||
|
|
42d11d9e7d | ||
|
|
e49c184595 | ||
|
|
99d87c5ca2 | ||
|
|
4c0f48c548 | ||
|
|
4ce6d35e30 | ||
|
|
81bf0c66c6 | ||
|
|
34dc725d26 | ||
|
|
a5db4ca092 | ||
|
|
61029fe20b | ||
|
|
fee3f88cb5 | ||
|
|
932500e43d | ||
|
|
55d4cdd464 | ||
|
|
fe3e47b1e8 | ||
|
|
9ca25bd48f | ||
|
|
91e0823ff0 | ||
|
|
142c6b11b3 | ||
|
|
ef0b8367b5 | ||
|
|
d1bfb4d2c0 | ||
|
|
3b5d6f003f | ||
|
|
26c457860b | ||
|
|
d0515031c7 | ||
|
|
08f4a0a816 | ||
|
|
28f95f1fbe | ||
|
|
c791de0e1e | ||
|
|
36b5426f6e | ||
|
|
1e72e9b1cd | ||
|
|
9739e55d0f | ||
|
|
1cddbc80cf | ||
|
|
3da9ee15d3 | ||
|
|
1e2fac054c | ||
|
|
914bfb2d9c | ||
|
|
40244994ad | ||
|
|
556ae07857 | ||
|
|
17fd71164c | ||
|
|
81d19156e9 | ||
|
|
7b82411e6f | ||
|
|
fb268add7a | ||
|
|
7700973538 | ||
|
|
54e25a0251 | ||
|
|
79b3a1fe4e | ||
|
|
faf013ec84 | ||
|
|
7152915318 | ||
|
|
886262e58a | ||
|
|
9c5d9ae376 | ||
|
|
3d2bc15e9a | ||
|
|
20c43c447d | ||
|
|
4caed7cc0d | ||
|
|
5b68f8ea6a | ||
|
|
8378bc9958 | ||
|
|
367cb48096 | ||
|
|
661b263e77 | ||
|
|
07c5e72cdb | ||
|
|
7752cdbfaf | ||
|
|
4545ecad58 | ||
|
|
ac74237f01 | ||
|
|
5a36179c19 | ||
|
|
82d73f387d | ||
|
|
e8c6314770 | ||
|
|
087c1b98dc | ||
|
|
68c5ad83fb | ||
|
|
5acc8c0134 | ||
|
|
c897b6a82d | ||
|
|
d008e90d50 | ||
|
|
ea820b30bf | ||
|
|
03725dc015 | ||
|
|
0a6f9bc1eb | ||
|
|
1946922de3 | ||
|
|
edf1f4233b | ||
|
|
f4b55ea7a7 | ||
|
|
8dfd1f03e9 | ||
|
|
acf26c5ab7 | ||
|
|
d9800c8135 | ||
|
|
02bef7560f | ||
|
|
07dd0692b6 | ||
|
|
4f3317effe | ||
|
|
9afdbe3648 | ||
|
|
fe0df01448 | ||
|
|
12e6907512 | ||
|
|
5aef492b4c | ||
|
|
5d7ed8ff7d | ||
|
|
b1754fc5ff | ||
|
|
19bbf3e142 | ||
|
|
e1755275a0 | ||
|
|
520037e721 | ||
|
|
cbb0828ab8 | ||
|
|
8774d10bdf | ||
|
|
df9f479d58 | ||
|
|
8bb52c9c2a | ||
|
|
947c423824 | ||
|
|
c3d24fb26d | ||
|
|
112f9ae087 | ||
|
|
01b9ff54d9 | ||
|
|
64a1904136 | ||
|
|
bce6864785 | ||
|
|
ecd54b4cba | ||
|
|
ca2b288a4b | ||
|
|
1016fbb8f9 | ||
|
|
be3f81c7ec | ||
|
|
9f3c151c3c | ||
|
|
9735f3d8f2 | ||
|
|
34680c5ccf | ||
|
|
58934e5881 | ||
|
|
ad3f98b8e7 | ||
|
|
7c33a33ef3 | ||
|
|
3dfcca68e6 | ||
|
|
73b74c94a1 | ||
|
|
18338d60d5 | ||
|
|
e106070640 | ||
|
|
091a7ae359 | ||
|
|
70160aeab3 | ||
|
|
1aa08f594d | ||
|
|
14d8a931fe | ||
|
|
30ba85bc67 | ||
|
|
caadcc3ed8 | ||
|
|
26f55472c6 | ||
|
|
79a58e275c | ||
|
|
900e584514 | ||
|
|
bb639d9f29 | ||
|
|
15dcacc1fc | ||
|
|
6d53e3c2d7 | ||
|
|
8ed7346273 | ||
|
|
3c1220adca | ||
|
|
4ed0eb7012 | ||
|
|
2af5445309 | ||
|
|
abb1916bda | ||
|
|
9424dca9e4 | ||
|
|
db84bb9bd3 | ||
|
|
c603f85488 | ||
|
|
2f1ee25f50 | ||
|
|
7bdf9005e5 | ||
|
|
d9c1d79e30 | ||
|
|
bd88b86919 | ||
|
|
8e29ae8c44 | ||
|
|
d158607f8e | ||
|
|
939fbb3c38 | ||
|
|
3b9dfa9d29 | ||
|
|
0c76fb57f2 | ||
|
|
9694fa8d3a | ||
|
|
20761e053e | ||
|
|
29d885b40f | ||
|
|
087dc13965 | ||
|
|
e7f559c582 | ||
|
|
52c5f6e152 | ||
|
|
26ca59859f | ||
|
|
23d6770ff9 | ||
|
|
ac36a377b0 | ||
|
|
1642867136 | ||
|
|
ce40392803 | ||
|
|
5f1af8a69d | ||
|
|
c57ff2640e | ||
|
|
d7fd396b7c | ||
|
|
45d145a823 | ||
|
|
221ef78faa | ||
|
|
d86513cbba | ||
|
|
25b5904b84 | ||
|
|
c2eb60df4a | ||
|
|
feabd0430c | ||
|
|
838de23357 | ||
|
|
d7b7040408 | ||
|
|
44e4bdc6f4 | ||
|
|
779060bc16 | ||
|
|
76239fa1ae | ||
|
|
5e53f767c4 | ||
|
|
974073a2e5 | ||
|
|
d693431183 | ||
|
|
bedf739d16 | ||
|
|
082755de1a | ||
|
|
6299e42aa9 | ||
|
|
129f41cee9 | ||
|
|
415bbc74aa | ||
|
|
13e41f2c68 | ||
|
|
1e117b780a | ||
|
|
f8c5c24159 | ||
|
|
f5a55c44d4 | ||
|
|
91a0e7bdaa | ||
|
|
b07e309627 | ||
|
|
9ea45399ce | ||
|
|
c19b1a143e | ||
|
|
02c24a860d | ||
|
|
9f652708ee | ||
|
|
05fa790584 | ||
|
|
e0db822a9b | ||
|
|
ec0fee6208 | ||
|
|
a188554fe1 | ||
|
|
8d52c7daf3 | ||
|
|
c49ebaaf1a | ||
|
|
acc9645249 | ||
|
|
60f961dfe8 | ||
|
|
0c48b1d993 | ||
|
|
d57b57bddc | ||
|
|
3837d2b94b | ||
|
|
f81a188ef6 | ||
|
|
8e417e28d1 | ||
|
|
4ce6830a7b | ||
|
|
3a7c79e2c7 | ||
|
|
cb2c2905c5 | ||
|
|
421160631a | ||
|
|
60aad1b717 | ||
|
|
72a17bdd76 | ||
|
|
9b9ce1c625 | ||
|
|
d7cb6de820 | ||
|
|
3d5750f31c | ||
|
|
fabf60bc4c | ||
|
|
f5be8ba11f | ||
|
|
94d587e6fc | ||
|
|
2ec44f7620 | ||
|
|
bbbf25201a | ||
|
|
d6a3215fe2 | ||
|
|
75699a3825 | ||
|
|
f3aeed77e5 | ||
|
|
cfbaf7bf1c | ||
|
|
bc6067d195 | ||
|
|
7203d93fb3 | ||
|
|
ffd497673f | ||
|
|
d00ff3c453 | ||
|
|
1d9e91e00f | ||
|
|
7f6ed35347 | ||
|
|
38027c8f52 | ||
|
|
dd5804c10e | ||
|
|
84dcd25a36 | ||
|
|
4519450363 | ||
|
|
3c70eca758 | ||
|
|
68a2d6fc40 | ||
|
|
a5923a5d51 | ||
|
|
769f0b1e24 | ||
|
|
a1271d984f | ||
|
|
db65ec4674 | ||
|
|
a984c55cf9 | ||
|
|
cdd0828c4a | ||
|
|
1984d0671b | ||
|
|
3e4efff73d | ||
|
|
f4d1b7c603 | ||
|
|
200caab82b | ||
|
|
f9b104f37b | ||
|
|
c25b482301 | ||
|
|
31d7cc2cd4 | ||
|
|
19ecdc75a8 | ||
|
|
46724508f8 | ||
|
|
51b0194b8a | ||
|
|
9a27c4a2f0 | ||
|
|
32df742b85 | ||
|
|
8392765213 | ||
|
|
806b10b934 | ||
|
|
1fa0553c71 | ||
|
|
50a68a1791 | ||
|
|
0b55a0423e | ||
|
|
565d95a377 | ||
|
|
f492f72154 | ||
|
|
4d84f0f6f0 | ||
|
|
a0d0c8e4af | ||
|
|
bef748abbd | ||
|
|
c4373ef290 | ||
|
|
0b8c5a6872 | ||
|
|
829ecb2086 | ||
|
|
d3564a4b09 | ||
|
|
a244753f47 | ||
|
|
745782a77a | ||
|
|
246cbe1312 | ||
|
|
6c941122eb | ||
|
|
1a884cd8e1 | ||
|
|
18f008f7c7 | ||
|
|
6d42569ade | ||
|
|
5ed781a330 | ||
|
|
66fcd02aa2 | ||
|
|
1fc0e9a6aa | ||
|
|
91567ba916 | ||
|
|
d80826b05d | ||
|
|
45bcf73185 | ||
|
|
78dc08bdc2 | ||
|
|
f98f115ac2 | ||
|
|
bf409936e7 | ||
|
|
b4364723ef | ||
|
|
bcc6359dec | ||
|
|
787a72a993 | ||
|
|
d9eb962969 | ||
|
|
f221153776 | ||
|
|
67596ef0cc | ||
|
|
bf5bfe589f | ||
|
|
af78c3925a | ||
|
|
d144958669 | ||
|
|
5a64003f6f | ||
|
|
311718309c | ||
|
|
98479d7ffd | ||
|
|
90e505e58f | ||
|
|
410b8dd0fd | ||
|
|
c2f25b6f62 | ||
|
|
03a2a74697 | ||
|
|
2807c11410 | ||
|
|
39d51ce845 | ||
|
|
a216583d95 | ||
|
|
5c448b1b97 | ||
|
|
7f49c38e2d | ||
|
|
0e7fdcee30 | ||
|
|
418f8bed6a | ||
|
|
950fe73c4f | ||
|
|
0892f1e406 | ||
|
|
9af4e7b1da | ||
|
|
198a838d00 | ||
|
|
aaa3fc3805 | ||
|
|
3c2efd9cf3 | ||
|
|
951b1e6a7a | ||
|
|
9c5fd6a776 | ||
|
|
e438dccf19 | ||
|
|
43d2655ee4 | ||
|
|
b2c92cdaaa | ||
|
|
42b1d92b2a | ||
|
|
1250312287 | ||
|
|
308371b434 | ||
|
|
88e6c11746 | ||
|
|
2ee4ae88f9 | ||
|
|
e2b6fb0a6a | ||
|
|
a19e3bc9d9 | ||
|
|
495c55e6a5 | ||
|
|
a366143c5b | ||
|
|
b8f5a7db33 | ||
|
|
f486cfae86 | ||
|
|
0838732df3 | ||
|
|
f422f09dff | ||
|
|
aff196ae57 | ||
|
|
27c9f8be7a | ||
|
|
67b6c945e2 | ||
|
|
c89aee37b9 | ||
|
|
03b7bebc96 | ||
|
|
f89d0f68d0 | ||
|
|
72a288f73f | ||
|
|
0073aee1ed | ||
|
|
0f7a51f461 | ||
|
|
556552340a | ||
|
|
50bce0130a | ||
|
|
5c6dc63577 | ||
|
|
2077d27053 | ||
|
|
76b3d3c559 | ||
|
|
514b2d6f12 | ||
|
|
470553ff5d | ||
|
|
88e0aa1cb2 | ||
|
|
35f2552fc5 | ||
|
|
e05886561d | ||
|
|
2451b9a75a | ||
|
|
06b71c99ee | ||
|
|
ae8f7f11d5 | ||
|
|
ed16ce9b73 | ||
|
|
27f895cf2c | ||
|
|
c11a2ac396 | ||
|
|
2f9ab26372 | ||
|
|
0559f46bbb | ||
|
|
6e5f83c45b | ||
|
|
e1b0582859 | ||
|
|
88d719689c | ||
|
|
200eb8dc0e | ||
|
|
082650bea3 | ||
|
|
abf079135e | ||
|
|
f00e8bc107 | ||
|
|
ce05e67a0c | ||
|
|
fecb1b0489 | ||
|
|
6a7e22386e | ||
|
|
85dfb4351c | ||
|
|
addf15f61f | ||
|
|
bbf3576f70 | ||
|
|
da3f4bd452 | ||
|
|
5eb6f903f2 | ||
|
|
60394ddf83 | ||
|
|
293d261cf9 | ||
|
|
f1cab828ee | ||
|
|
6a8d0fb955 | ||
|
|
c8ca055935 | ||
|
|
e0abb46616 | ||
|
|
170b89f468 | ||
|
|
674c6f7a7b | ||
|
|
db35bcf2ce | ||
|
|
901d1314af | ||
|
|
9e9bfd0255 | ||
|
|
1080609c86 | ||
|
|
8315bcd0d8 | ||
|
|
7fb9301c03 | ||
|
|
63f3e5c3fc | ||
|
|
a75412440f | ||
|
|
47de1d2e0e | ||
|
|
14fe8ecb58 | ||
|
|
0f01e7ef0f | ||
|
|
16f7c64a9f | ||
|
|
00aa9841b7 | ||
|
|
7802088e71 | ||
|
|
6d04c9c585 | ||
|
|
e210cb3670 | ||
|
|
47b577fcc0 | ||
|
|
e9d970154d | ||
|
|
202d0b64eb | ||
|
|
c25816eabc | ||
|
|
ee028a4693 | ||
|
|
9c65168312 | ||
|
|
16aeb68c28 | ||
|
|
6167104644 | ||
|
|
30b77f59b1 | ||
|
|
a690772cc5 | ||
|
|
cf8abd8888 | ||
|
|
17e0de1e87 | ||
|
|
b7e3651d3c | ||
|
|
ef4d023c85 | ||
|
|
654a6e9871 | ||
|
|
2ca5ee026d | ||
|
|
9b7d593e28 | ||
|
|
ad928f0078 | ||
|
|
92bb2928e4 | ||
|
|
47dfc1b1b0 | ||
|
|
7d8413a589 | ||
|
|
24722ddd02 | ||
|
|
f31a00de01 | ||
|
|
d44e4399e6 | ||
|
|
f9ae71fd17 | ||
|
|
ce28e904c9 | ||
|
|
77b8885a24 | ||
|
|
8f2a3efa85 | ||
|
|
89febdb3d6 | ||
|
|
3eac02f676 | ||
|
|
a526ad2e80 | ||
|
|
65b6f4aa31 | ||
|
|
9e88941515 | ||
|
|
3becee9e5d | ||
|
|
40a2fa8e81 | ||
|
|
39f81d2c5b | ||
|
|
59bb54ed6a | ||
|
|
9ab5e0312d | ||
|
|
54ab3a1d5b | ||
|
|
92c94011f1 | ||
|
|
35cbe43b6d | ||
|
|
a2cd3c9a1d | ||
|
|
7b0b0f9101 |
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -7,6 +7,11 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
All GitHub issues are addressed on a best-effort basis at MinIO's sole discretion. There are no Service Level Agreements (SLA) or Objectives (SLO). Remember our [Code of Conduct](https://github.com/minio/minio/blob/master/code_of_conduct.md) when engaging with MinIO Engineers and the larger community.
|
||||
|
||||
For urgent issues (e.g. production down, etc.), subscribe to [SUBNET](https://min.io/pricing?jmp=github) for direct to engineering support.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,6 +7,9 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
If this case is urgent, please subscribe to [Subnet](https://min.io/pricing) so that our 24/7 support team may help you faster.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
39
.github/lock.yml
vendored
39
.github/lock.yml
vendored
@@ -1,39 +0,0 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: true
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
2
.github/stale.yml
vendored
2
.github/stale.yml
vendored
@@ -14,7 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
- "do-not-close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
23
.github/workflows/go-cross.yml
vendored
23
.github/workflows/go-cross.yml
vendored
@@ -1,26 +1,33 @@
|
||||
name: Go
|
||||
name: Crosscompile
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO crosscompile tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Build Tests with Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
- uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
46
.github/workflows/go-healing.yml
vendored
Normal file
46
.github/workflows/go-healing.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Healing Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:oyArl7zlPECEduNbB1KXgdzDn2Bdpvvw0l8VO51HQnY="
|
||||
MINIO_KMS_AUTO_ENCRYPTION: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify-healing
|
||||
34
.github/workflows/go-lint.yml
vendored
34
.github/workflows/go-lint.yml
vendored
@@ -1,23 +1,48 @@
|
||||
name: Go
|
||||
name: Linters and Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: |
|
||||
%LocalAppData%\go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
@@ -39,4 +64,5 @@ jobs:
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -deps -json ./... | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' | ./nancy sleuth
|
||||
make
|
||||
make test
|
||||
make test-race
|
||||
|
||||
24
.github/workflows/go.yml
vendored
24
.github/workflows/go.yml
vendored
@@ -1,23 +1,38 @@
|
||||
name: Go
|
||||
name: Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO Setup on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }} - healing
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
@@ -32,4 +47,3 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify
|
||||
make verify-healing
|
||||
|
||||
96
.github/workflows/iam-integrations.yaml
vendored
Normal file
96
.github/workflows/iam-integrations.yaml
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
name: IAM integration
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
iam-matrix-test:
|
||||
name: "[Go=${{ matrix.go-version }}|ldap=${{ matrix.ldap }}|etcd=${{ matrix.etcd }}|openid=${{ matrix.openid }}]"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: quay.io/minio/openldap
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
env:
|
||||
LDAP_ORGANIZATION: "MinIO Inc"
|
||||
LDAP_DOMAIN: "min.io"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
etcd:
|
||||
image: "quay.io/coreos/etcd:v3.5.1"
|
||||
env:
|
||||
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ports:
|
||||
- "2379:2379"
|
||||
options: >-
|
||||
--health-cmd "etcdctl endpoint health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
openid:
|
||||
image: quay.io/minio/dex
|
||||
ports:
|
||||
- "5556:5556"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
|
||||
strategy:
|
||||
# When ldap, etcd or openid vars are empty below, those external servers
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
exclude:
|
||||
# exclude combos where all are empty.
|
||||
- ldap: ""
|
||||
etcd: ""
|
||||
openid: ""
|
||||
# exclude combos where both ldap and openid IDPs are specified.
|
||||
- ldap: "localhost:389"
|
||||
openid: "http://127.0.0.1:5556/dex"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Test LDAP/OpenID/Etcd combo
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
make test-site-replication-ldap
|
||||
- name: Test OIDC for automatic site replication
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
24
.github/workflows/lock.yml
vendored
Normal file
24
.github/workflows/lock.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: '365'
|
||||
exclude-any-issue-labels: 'do-not-close'
|
||||
issue-lock-reason: 'resolved'
|
||||
log-output: true
|
||||
25
.github/workflows/markdown-lint.yaml
vendored
Normal file
25
.github/workflows/markdown-lint.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Markdown Linter
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint all docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Lint all docs
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
markdownlint --fix '**/*.md' --disable MD013 MD040
|
||||
47
.github/workflows/replication.yaml
vendored
Normal file
47
.github/workflows/replication.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Multi-site replication tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
replication-test:
|
||||
name: Replication Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Test Replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-replication
|
||||
- name: Test MinIO IDP for automatic site replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-site-replication-minio
|
||||
|
||||
31
.github/workflows/upgrade-ci-cd.yaml
vendored
Normal file
31
.github/workflows/upgrade-ci-cd.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Upgrade old version tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Start upgrade tests
|
||||
run: |
|
||||
make test-upgrade
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -21,4 +21,15 @@ prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
node_modules/
|
||||
node_modules/
|
||||
mc.*
|
||||
s3-check-md5*
|
||||
xl-meta*
|
||||
healing-*
|
||||
inspect*
|
||||
200M*
|
||||
hash-set
|
||||
minio.RELEASE*
|
||||
mc
|
||||
nancy
|
||||
inspects/*
|
||||
@@ -23,12 +23,28 @@ linters:
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
- gocritic
|
||||
- gofumpt
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
lang-version: "1.17"
|
||||
|
||||
# Choose whether or not to use the extra rules that are disabled
|
||||
# by default
|
||||
extra-rules: false
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
# todo fix these when we get enough time.
|
||||
- "singleCaseSwitch: should rewrite switch statement to if statement"
|
||||
- "unlambda: replace"
|
||||
- "captLocal:"
|
||||
- "ifElseChain:"
|
||||
- "elseif:"
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
||||
7
COMPLIANCE.md
Normal file
7
COMPLIANCE.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# AGPLv3 Compliance
|
||||
|
||||
We have designed MinIO as an Open Source software for the Open Source software community. This requires applications to consider whether their usage of MinIO is in compliance with the GNU AGPLv3 [license](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
MinIO cannot make the determination as to whether your application's usage of MinIO is in compliance with the AGPLv3 license requirements. You should instead rely on your own legal counsel or licensing specialists to audit and ensure your application is in compliance with the licenses of MinIO and all other open-source projects with which your application integrates or interacts. We understand that AGPLv3 licensing is complex and nuanced. It is for that reason we strongly encourage using experts in licensing to make any such determinations around compliance instead of relying on apocryphal or anecdotal advice.
|
||||
|
||||
[MinIO Commercial Licensing](https://min.io/pricing) is the best option for applications that trigger AGPLv3 obligations (e.g. open sourcing your application). Applications using MinIO - or any other OSS-licensed code - without validating their usage do so at their own risk.
|
||||
@@ -7,15 +7,17 @@
|
||||
Start by forking the MinIO GitHub repository, make changes in a branch and then send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details:
|
||||
|
||||
### Setup your MinIO GitHub Repository
|
||||
|
||||
Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to your own personal repository. Copy the URL of your MinIO fork (you will need it for the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/minio/minio
|
||||
$ go install -v
|
||||
$ ls /go/bin/minio
|
||||
git clone https://github.com/minio/minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
```sh
|
||||
$ cd minio
|
||||
$ git remote add upstream https://github.com/minio/minio
|
||||
@@ -25,13 +27,15 @@ $ git merge upstream/master
|
||||
```
|
||||
|
||||
### Create your feature branch
|
||||
|
||||
Before making code changes, make sure you create a separate branch for these changes
|
||||
|
||||
```
|
||||
$ git checkout -b my-new-feature
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
### Test MinIO server changes
|
||||
|
||||
After your code changes, make sure
|
||||
|
||||
- To add test cases for the new code. If you have questions about how to do it, please ask on our [Slack](https://slack.min.io) channel.
|
||||
@@ -40,29 +44,38 @@ After your code changes, make sure
|
||||
- To run `make test` and `make build` completes.
|
||||
|
||||
### Commit changes
|
||||
|
||||
After verification, commit your changes. This is a [great post](https://chris.beams.io/posts/git-commit/) on how to write useful commit messages
|
||||
|
||||
```
|
||||
$ git commit -am 'Add some feature'
|
||||
git commit -am 'Add some feature'
|
||||
```
|
||||
|
||||
### Push to the branch
|
||||
|
||||
Push your locally committed changes to the remote origin (your fork)
|
||||
|
||||
```
|
||||
$ git push origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
```
|
||||
|
||||
### Create a Pull Request
|
||||
|
||||
Pull requests can be created via GitHub. Refer to [this document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged.
|
||||
|
||||
## FAQs
|
||||
### How does ``MinIO`` manages dependencies?
|
||||
|
||||
### How does ``MinIO`` manage dependencies?
|
||||
|
||||
``MinIO`` uses `go mod` to manage its dependencies.
|
||||
|
||||
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
|
||||
|
||||
To remove a dependency
|
||||
|
||||
- Edit your code and remove the import reference.
|
||||
- Run `go mod tidy` in the source folder to remove dependency from `go.mod` file.
|
||||
|
||||
### What are the coding guidelines for MinIO?
|
||||
|
||||
``MinIO`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.min.io).
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
FROM minio/minio:edge
|
||||
FROM minio/minio:latest
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY minio /usr/bin/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
|
||||
50
Dockerfile.hotfix
Normal file
50
Dockerfile.hotfix
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -18,7 +18,8 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -26,18 +27,21 @@ COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /usr/bin/minio.minisig && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -18,7 +18,8 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -26,18 +27,21 @@ COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /usr/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.sha256sum -o /usr/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /usr/bin/minio.minisig && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /usr/bin/minio && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
5
Dockerfile.scratch
Normal file
5
Dockerfile.scratch
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM scratch
|
||||
|
||||
COPY minio /minio
|
||||
|
||||
CMD ["/minio"]
|
||||
103
Makefile
103
Makefile
@@ -10,78 +10,115 @@ TAG ?= "minio/minio:$(VERSION)"
|
||||
|
||||
all: build
|
||||
|
||||
checks:
|
||||
checks: ## check dependencies
|
||||
@echo "Checking dependencies"
|
||||
@(env bash $(PWD)/buildscripts/checkdeps.sh)
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer)
|
||||
help: ## print this help
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' Makefile | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
crosscompile:
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
||||
verifiers: getdeps lint check-gen
|
||||
|
||||
check-gen:
|
||||
check-gen: ## check for updated autogenerated files
|
||||
@go generate ./... >/dev/null
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
|
||||
lint:
|
||||
lint: ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
test-race: verifiers build
|
||||
test-upgrade: build
|
||||
@echo "Running minio upgrade tests"
|
||||
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
|
||||
|
||||
test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
# Verify minio binary
|
||||
verify:
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-replication: install ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
|
||||
|
||||
test-site-replication-ldap: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with LDAP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-ldap.sh)
|
||||
|
||||
test-site-replication-oidc: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with OIDC)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-oidc.sh)
|
||||
|
||||
test-site-replication-minio: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
verify-healing:
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
|
||||
# Builds minio locally.
|
||||
build: checks
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
|
||||
hotfix: hotfix-vars install
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
$(eval TAG := "minio/minio:$(VERSION)")
|
||||
|
||||
docker-hotfix: hotfix checks
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
@mv -f ./minio ./minio.$(VERSION)
|
||||
@minisign -qQSm ./minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < ./minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio.$(VERSION).sha256sum
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@docker push -q $(TAG) && echo "Published new container $(TAG)"
|
||||
|
||||
docker-hotfix: hotfix-push checks ## builds minio docker container with hotfix tags
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@docker build -q --no-cache -t $(TAG) --build-arg RELEASE=$(VERSION) . -f Dockerfile.hotfix
|
||||
|
||||
docker: build checks
|
||||
docker: build checks ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
install: build
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean:
|
||||
clean: ## cleanup all generated assets
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@find . -name '*~' | xargs rm -fv
|
||||
|
||||
117
README.md
117
README.md
@@ -1,13 +1,14 @@
|
||||
# MinIO Quickstart Guide
|
||||
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/) [](https://github.com/minio/minio/blob/master/LICENSE)
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Container Installation
|
||||
## Container Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server as a container.
|
||||
|
||||
@@ -16,34 +17,32 @@ require distributed deploying MinIO with Erasure Coding. For extended developmen
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
### Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
|
||||
|
||||
```sh
|
||||
podman run \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
minio/minio server /data --console-address ":9001"
|
||||
podman run -p 9000:9000 -p 9001:9001 \
|
||||
quay.io/minio/minio server /data --console-address ":9001"
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
|
||||
# macOS
|
||||
## macOS
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
### Homebrew (recommended)
|
||||
|
||||
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -59,11 +58,11 @@ brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
### Binary Download
|
||||
|
||||
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -73,11 +72,11 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
# GNU/Linux
|
||||
## GNU/Linux
|
||||
|
||||
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -93,18 +92,18 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
|
||||
| Architecture | URL |
|
||||
| -------- | ------ |
|
||||
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
## Microsoft Windows
|
||||
|
||||
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
|
||||
|
||||
@@ -118,31 +117,31 @@ Use the following command to run a standalone MinIO server on the Windows host.
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
# Install from Source
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.17](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go install github.com/minio/minio@latest
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
## Deployment Recommendations
|
||||
|
||||
## Allow port access for Firewalls
|
||||
### Allow port access for Firewalls
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
@@ -198,18 +197,21 @@ service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
# Test MinIO Connectivity
|
||||
## Test MinIO Connectivity
|
||||
|
||||
## Test using MinIO Console
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
### Test using MinIO Console
|
||||
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
|
||||
> NOTE: MinIO runs console on random port by default if you wish choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
|
||||
### Things to consider
|
||||
|
||||
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
|
||||
|
||||
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
|
||||
@@ -220,34 +222,40 @@ Similarly, if your TLS certificates do not have the IP SAN for the MinIO server
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
|
||||
|
||||
# Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
## Upgrading MinIO
|
||||
|
||||
```
|
||||
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/minio/baremetal/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and do `mc admin service restart alias/`.
|
||||
|
||||
## Important things to remember during MinIO upgrades
|
||||
- For RPM/DEB installations, upgrade packages **parallelly** on all servers. Once upgraded, perform `systemctl restart minio` across all nodes in **parallel**. RPM/DEB based installations are usually automated using [`ansible`](https://github.com/minio/ansible-minio).
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
### Upgrade Checklist
|
||||
|
||||
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
|
||||
- Read the release notes for the targeted MinIO release *before* performing any installation, there is no forced requirement to upgrade to latest releases every week. If it has a bug fix you are looking for then yes, else avoid actively upgrading a running production system.
|
||||
- Make sure MinIO process has write access to `/opt/bin` if you plan to use `mc admin update`. This is needed for MinIO to download the latest binary from <https://dl.min.io> and save it locally for upgrades.
|
||||
- `mc admin update` is not supported in kubernetes/container environments, container environments provide their own mechanisms for container updates.
|
||||
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
|
||||
|
||||
## Explore Further
|
||||
|
||||
# Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
|
||||
@@ -255,9 +263,12 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
|
||||
- [The MinIO documentation website](https://docs.min.io)
|
||||
|
||||
# Contribute to MinIO Project
|
||||
## Contribute to MinIO Project
|
||||
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
# License
|
||||
MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
## License
|
||||
|
||||
- MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
- MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)
|
||||
|
||||
@@ -18,9 +18,10 @@ you need access credentials for a successful exploit).
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security team
|
||||
for the past five days please contact the security team directly:
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
### Disclosure Process
|
||||
|
||||
@@ -32,7 +33,7 @@ MinIO uses the following disclosure process:
|
||||
If the report is rejected the response explains why.
|
||||
3. Code is audited to find any potential similar problems.
|
||||
4. Fixes are prepared for the latest release.
|
||||
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
|
||||
5. On the date that the fixes are applied a security advisory will be published on <https://blog.min.io>.
|
||||
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
|
||||
the security issue. By default MinIO will **not** publish this information to protect your privacy.
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
## Vulnerability Management Policy
|
||||
# Vulnerability Management Policy
|
||||
|
||||
This document formally describes the process of addressing and managing a
|
||||
reported vulnerability that has been found in the MinIO server code base,
|
||||
any directly connected ecosystem component or a direct / indirect dependency
|
||||
of the code base.
|
||||
|
||||
### Scope
|
||||
## Scope
|
||||
|
||||
The vulnerability management policy described in this document covers the
|
||||
process of investigating, assessing and resolving a vulnerability report
|
||||
@@ -14,13 +14,13 @@ opened by a MinIO employee or an external third party.
|
||||
Therefore, it lists pre-conditions and actions that should be performed to
|
||||
resolve and fix a reported vulnerability.
|
||||
|
||||
### Vulnerability Management Process
|
||||
## Vulnerability Management Process
|
||||
|
||||
The vulnerability management process requires that the vulnerability report
|
||||
contains the following information:
|
||||
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
reported vulnerability and how it might be exploited. Alternatively,
|
||||
a well-established vulnerability identifier, e.g. CVE number, can be
|
||||
used instead.
|
||||
@@ -28,12 +28,11 @@ contains the following information:
|
||||
Based on the description mentioned above, a MinIO engineer or security team
|
||||
member investigates:
|
||||
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
|
||||
In general, if the vulnerability exists in one of the MinIO code bases
|
||||
itself - not in a code dependency - then MinIO will, if possible, fix
|
||||
the vulnerability or implement reasonable countermeasures such that the
|
||||
vulnerability cannot be exploited anymore.
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
**/*.swp
|
||||
cover.out
|
||||
*~
|
||||
minio
|
||||
!*/
|
||||
site/
|
||||
**/*.test
|
||||
**/*.sublime-workspace
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
build
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
.DS_Store
|
||||
*.syso
|
||||
coverage.txt
|
||||
node_modules
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
||||
92
buildscripts/minio-upgrade.sh
Normal file
92
buildscripts/minio-upgrade.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
docker volume prune -f
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
local sum1
|
||||
sum1=$(curl -s "$2" | sha256sum);
|
||||
mc admin heal --json -r "$1" >/dev/null; # test after healing
|
||||
local sum1_heal
|
||||
sum1_heal=$(curl -s "$2" | sha256sum);
|
||||
|
||||
if [ "${sum1_heal}" != "${sum1}" ]; then
|
||||
echo "mismatch expected ${sum1_heal}, got ${sum1}"
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
verify_checksum_mc() {
|
||||
local expected
|
||||
expected=$(mc cat "$1" | sha256sum)
|
||||
local got
|
||||
got=$(mc cat "$2" | sha256sum)
|
||||
|
||||
if [ "${expected}" != "${got}" ]; then
|
||||
echo "mismatch - expected ${expected}, got ${got}"
|
||||
exit 1;
|
||||
fi
|
||||
echo "matches - ${expected}, got ${got}"
|
||||
}
|
||||
|
||||
add_alias() {
|
||||
for i in $(seq 1 4); do
|
||||
echo "... attempting to add alias $i"
|
||||
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
|
||||
echo "...waiting... for 5secs" && sleep 5
|
||||
done
|
||||
done
|
||||
|
||||
echo "Sleeping for nginx"
|
||||
sleep 20
|
||||
}
|
||||
|
||||
__init__() {
|
||||
sudo apt install curl -y
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH=${PATH}:${GOPATH}/bin
|
||||
|
||||
go install github.com/minio/mc@latest
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
mc mb minio/minio-test/
|
||||
mc cp ./minio minio/minio-test/to-read/
|
||||
mc cp /etc/hosts minio/minio-test/to-read/hosts
|
||||
mc policy set download minio/minio-test
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
|
||||
|
||||
cleanup
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
|
||||
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
|
||||
for d in $(go list ./... | grep -v dsync); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
172
buildscripts/unaligned-healing.sh
Executable file
172
buildscripts/unaligned-healing.sh
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO_OLD=( "$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function download_old_release() {
|
||||
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
|
||||
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
|
||||
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
|
||||
fi
|
||||
}
|
||||
|
||||
function start_minio_16drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
|
||||
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
|
||||
"${WORK_DIR}/mc" cp \
|
||||
"${WORK_DIR}/unaligned" \
|
||||
minio/healing-shard-bucket/unaligned \
|
||||
--disable-multipart --quiet
|
||||
|
||||
## "unaligned" object name gets consistently distributed
|
||||
## to disks in following distribution order
|
||||
##
|
||||
## NOTE: if you change the name make sure to change the
|
||||
## distribution order present here
|
||||
##
|
||||
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
|
||||
|
||||
## make sure to remove the "last" data shard
|
||||
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
## Heal the shard
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
## then remove any other data shard let's pick first disk
|
||||
## - 1st data shard.
|
||||
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
download_old_release
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_16drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
81
buildscripts/upgrade-tests/compose.yml
Normal file
81
buildscripts/upgrade-tests/compose.yml
Normal file
@@ -0,0 +1,81 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
command: server http://minio{1...4}/data{1...3}
|
||||
env_file:
|
||||
- ./minio.env
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- data1-1:/data1
|
||||
- data1-2:/data2
|
||||
- data1-3:/data3
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- data2-1:/data1
|
||||
- data2-2:/data2
|
||||
- data2-3:/data3
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- data3-1:/data1
|
||||
- data3-2:/data2
|
||||
- data3-3:/data3
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- data4-1:/data1
|
||||
- data4-2:/data2
|
||||
- data4-3:/data3
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
data1-1:
|
||||
data1-2:
|
||||
data1-3:
|
||||
data2-1:
|
||||
data2-2:
|
||||
data2-3:
|
||||
data3-1:
|
||||
data3-2:
|
||||
data3-3:
|
||||
data4-1:
|
||||
data4-2:
|
||||
data4-3:
|
||||
3
buildscripts/upgrade-tests/minio.env
Normal file
3
buildscripts/upgrade-tests/minio.env
Normal file
@@ -0,0 +1,3 @@
|
||||
MINIO_ACCESS_KEY=minioadmin
|
||||
MINIO_SECRET_KEY=minioadmin
|
||||
MINIO_BROWSER=off
|
||||
68
buildscripts/upgrade-tests/nginx.conf
Normal file
68
buildscripts/upgrade-tests/nginx.conf
Normal file
@@ -0,0 +1,68 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
#gzip on;
|
||||
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
}
|
||||
|
||||
# main minio
|
||||
server {
|
||||
listen 9000;
|
||||
listen [::]:9000;
|
||||
server_name localhost;
|
||||
|
||||
# To allow special characters in headers
|
||||
ignore_invalid_headers off;
|
||||
# Allow any size file to be uploaded.
|
||||
# Set to a value such as 1000m; to restrict file size to a specific value
|
||||
client_max_body_size 0;
|
||||
# To disable buffering
|
||||
proxy_buffering off;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
|
||||
proxy_pass http://minio;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,9 @@ export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
@@ -65,7 +68,7 @@ function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets-ipv6{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets-ipv6{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
@@ -14,61 +13,79 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
export GOGC=25
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_port=$2
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
disown $!
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
disown $!
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
disown $!
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
if [ "$(pgrep -c minio)" -ne 3 ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
if ! ps -p $pid1 1>&2 > /dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 > /dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 > /dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1;
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
if grep -q 'Unable to initialize sub-systems' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -88,33 +105,36 @@ function __init__()
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 60
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 60
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
pkill -9 minio
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
perform_test "2"
|
||||
perform_test "1"
|
||||
perform_test "3"
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
|
||||
@@ -114,8 +114,6 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketACLHandler - GET Bucket ACL
|
||||
@@ -164,8 +162,6 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutObjectACLHandler - PUT Object ACL
|
||||
@@ -229,8 +225,6 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectACLHandler - GET Object ACL
|
||||
@@ -283,6 +277,4 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
@@ -65,12 +65,33 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
quotaConfig, err := parseBucketQuota(bucket, data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
if quotaConfig.Type == "fifo" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -85,7 +106,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -99,7 +120,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -122,7 +143,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := pathClean(vars["bucket"])
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
update := r.Form.Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
@@ -130,7 +151,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -155,7 +176,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
@@ -169,7 +190,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
target.SourceBucket = bucket
|
||||
var ops []madmin.TargetUpdateType
|
||||
if update {
|
||||
ops = madmin.GetTargetUpdateOps(r.URL.Query())
|
||||
ops = madmin.GetTargetUpdateOps(r.Form)
|
||||
} else {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
@@ -230,7 +251,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -258,7 +279,7 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -298,7 +319,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
@@ -324,7 +345,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
219
cmd/admin-handler-utils.go
Normal file
219
cmd/admin-handler-utils.go
Normal file
@@ -0,0 +1,219 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
for _, action := range actions {
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
return objectAPI, cred
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
type AdminError struct {
|
||||
Code string
|
||||
Message string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (ae AdminError) Error() string {
|
||||
return ae.Message
|
||||
}
|
||||
|
||||
func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
if err == nil {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr APIError
|
||||
switch e := err.(type) {
|
||||
case iampolicy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioMalformedIAMPolicy",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case config.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case AdminError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case SRError:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(e.Code, e.Cause)
|
||||
case decomError:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errDecommissionAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errDecommissionComplete):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
case errors.Is(err, errIAMActionNotAllowed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMActionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
case errors.Is(err, errIAMServiceAccount):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMServiceAccount",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errIAMServiceAccountUsed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMServiceAccountUsed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errIAMNotInitialized):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, errPolicyInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminPolicyInUse",
|
||||
Description: "The policy cannot be removed, as it is in use",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
|
||||
// Tier admin API errors
|
||||
case errors.Is(err, madmin.ErrTierNameEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierNameEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfigVersion",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierTypeUnsupported):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierTypeUnsupported",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierBackendNotEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendNotEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errIsTierPermError(err):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientPermissions",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
|
||||
// specific error.
|
||||
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
||||
switch err {
|
||||
case errErasureWriteQuorum:
|
||||
return ErrAdminConfigNoQuorum
|
||||
default:
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
@@ -40,31 +39,13 @@ import (
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
return cred, objectAPI
|
||||
}
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -83,6 +64,12 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
subSys, _, _, err := config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -93,11 +80,32 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic := config.SubSystemsDynamic.Contains(subSys)
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
}
|
||||
|
||||
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
|
||||
r *http.Request, w http.ResponseWriter) {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// Tell the client that dynamic config was applied.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
@@ -106,7 +114,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -137,7 +145,13 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
subSys, _, _, err := config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -155,14 +169,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
if dynamic {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// If all values were dynamic, tell the client.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -173,14 +180,14 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg := globalServerConfig.Clone()
|
||||
vars := mux.Vars(r)
|
||||
var buf = &bytes.Buffer{}
|
||||
buf := &bytes.Buffer{}
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -202,7 +209,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -225,11 +232,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
} else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,7 +244,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -268,7 +273,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
if err = validateConfig(cfg, ""); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -287,7 +292,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -327,7 +332,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -337,7 +342,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
subSys := vars["subSys"]
|
||||
key := vars["key"]
|
||||
|
||||
_, envOnly := r.URL.Query()["env"]
|
||||
_, envOnly := r.Form["env"]
|
||||
|
||||
rd, err := GetHelp(subSys, key, envOnly)
|
||||
if err != nil {
|
||||
@@ -346,7 +351,6 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(rd)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
@@ -355,7 +359,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -380,7 +384,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
if err = validateConfig(cfg, ""); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -407,7 +411,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -437,6 +441,8 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
off = !openid.Enabled(kv)
|
||||
case config.IdentityLDAPSubSys:
|
||||
off = !xldap.Enabled(kv)
|
||||
case config.IdentityTLSSubSys:
|
||||
off = !globalSTSTLSConfig.Enabled
|
||||
}
|
||||
if off {
|
||||
s.WriteString(config.KvComment)
|
||||
|
||||
179
cmd/admin-handlers-pools.go
Normal file
179
cmd/admin-handlers-pools.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StartDecommission")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := pools.Decommission(r.Context(), idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "CancelDecommission")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StatusPool")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := pools.Status(r.Context(), idx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListPools")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
poolsStatus := make([]PoolStatus, len(globalEndpoints))
|
||||
for idx := range globalEndpoints {
|
||||
status, err := pools.Status(r.Context(), idx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
poolsStatus[idx] = status
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
492
cmd/admin-handlers-site-replication.go
Normal file
492
cmd/admin-handlers-site-replication.go
Normal file
@@ -0,0 +1,492 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
|
||||
func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationAdd")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var sites []madmin.PeerSite
|
||||
if err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
//
|
||||
// used internally to tell current cluster to enable SR with
|
||||
// the provided peer clusters and service account.
|
||||
func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerJoin")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var joinArg madmin.SRPeerJoinReq
|
||||
if err := parseJSONBody(ctx, r.Body, &joinArg, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRPeerBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
|
||||
func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerBucketOps")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
operation := madmin.BktOp(vars["operation"])
|
||||
|
||||
var err error
|
||||
switch operation {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.MakeWithVersioningBktOp:
|
||||
_, isLockEnabled := r.Form["lockEnabled"]
|
||||
_, isVersioningEnabled := r.Form["versioningEnabled"]
|
||||
opts := BucketOptions{
|
||||
Location: r.Form.Get("location"),
|
||||
LockEnabled: isLockEnabled,
|
||||
VersioningEnabled: isVersioningEnabled,
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
|
||||
case madmin.ConfigureReplBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket)
|
||||
case madmin.DeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, false)
|
||||
case madmin.ForceDeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, true)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||
func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerReplicateIAMItem")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var item madmin.SRIAMItem
|
||||
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
switch item.Type {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRIAMItemPolicy:
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
} else {
|
||||
policy, perr := iampolicy.ParseConfig(bytes.NewReader(item.Policy))
|
||||
if perr != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, perr), r.URL)
|
||||
return
|
||||
}
|
||||
if policy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy)
|
||||
}
|
||||
}
|
||||
case madmin.SRIAMItemSvcAcc:
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange)
|
||||
case madmin.SRIAMItemPolicyMapping:
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping)
|
||||
case madmin.SRIAMItemSTSAcc:
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential)
|
||||
case madmin.SRIAMItemIAMUser:
|
||||
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser)
|
||||
case madmin.SRIAMItemGroupInfo:
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerReplicateBucketItem")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var item madmin.SRBucketMeta
|
||||
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
switch item.Type {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRBucketMetaTypePolicy:
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
bktPolicy, berr := policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
|
||||
if berr != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, berr), r.URL)
|
||||
return
|
||||
}
|
||||
if bktPolicy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeQuotaConfig:
|
||||
if item.Quota == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeTags:
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags)
|
||||
case madmin.SRBucketMetaTypeObjectLockConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationInfo - GET /minio/admin/v3/site-replication/info
|
||||
func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationInfo")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
info, err := globalSiteReplicationSys.GetClusterInfo(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationGetIDPSettings")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
idpSettings := globalSiteReplicationSys.GetIDPSettings(ctx)
|
||||
if err := json.NewEncoder(w).Encode(idpSettings); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
}
|
||||
}
|
||||
if encryptionKey != "" {
|
||||
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// SiteReplicationStatus - GET /minio/admin/v3/site-replication/status
|
||||
func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationStatus")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
opts := getSRStatusOptions(r)
|
||||
// default options to all if status options are unset for backward compatibility
|
||||
var dfltOpts madmin.SRStatusOptions
|
||||
if opts == dfltOpts {
|
||||
opts.Buckets = true
|
||||
opts.Users = true
|
||||
opts.Policies = true
|
||||
opts.Groups = true
|
||||
}
|
||||
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationMetaInfo - GET /minio/admin/v3/site-replication/metainfo
|
||||
func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationMetaInfo")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
opts := getSRStatusOptions(r)
|
||||
info, err := globalSiteReplicationSys.SiteReplicationMetaInfo(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationEdit - PUT /minio/admin/v3/site-replication/edit
|
||||
func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationEdit")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
var site madmin.PeerInfo
|
||||
err := parseJSONBody(ctx, r.Body, &site, cred.SecretKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerEdit")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var pi madmin.PeerInfo
|
||||
if err := parseJSONBody(ctx, r.Body, &pi, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
q := r.Form
|
||||
opts.Buckets = q.Get("buckets") == "true"
|
||||
opts.Policies = q.Get("policies") == "true"
|
||||
opts.Groups = q.Get("groups") == "true"
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
return
|
||||
}
|
||||
|
||||
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
|
||||
func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationRemove")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
var rreq madmin.SRRemoveReq
|
||||
err := parseJSONBody(ctx, r.Body, &rreq, "")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerRemove - PUT /minio/admin/v3/site-replication/peer/remove
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerRemove")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var req madmin.SRRemoveReq
|
||||
if err := parseJSONBody(ctx, r.Body, &req, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
141
cmd/admin-handlers-users-race_test.go
Normal file
141
cmd/admin-handlers-users-race_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !race
|
||||
// +build !race
|
||||
|
||||
// Tests in this file are not run under the `-race` flag as they are too slow
|
||||
// and cause context deadline errors.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestDeleteUserRace(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
secretKeys[i] = secretKey
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
err := s.adm.RemoveUser(ctx, accessKeys[i])
|
||||
if err != nil {
|
||||
c.Fatalf("unable to remove user: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
1176
cmd/admin-handlers-users_test.go
Normal file
1176
cmd/admin-handlers-users_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,10 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -37,15 +37,13 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
@@ -153,9 +151,13 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
for _, nerr := range globalNotificationSys.ServerUpdate(ctx, u, sha256Sum, lrTime, releaseInfo) {
|
||||
if nerr.Err != nil {
|
||||
err := AdminError{
|
||||
Code: AdminUpdateApplyFailure,
|
||||
Message: nerr.Err.Error(),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
err = fmt.Errorf("Server update failed, please do not restart the servers yet: failed with %w", nerr.Err)
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -163,7 +165,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
updateStatus, err := updateServer(u, sha256Sum, lrTime, releaseInfo, mode)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Server update failed, please do not restart the servers yet: failed with %w", err)
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -190,7 +192,11 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// ServiceHandler - POST /minio/admin/v3/service?action={action}
|
||||
// ----------
|
||||
// restarts/stops minio server gracefully. In a distributed setup,
|
||||
// Supports following actions:
|
||||
// - restart (restarts all the MinIO instances in a setup)
|
||||
// - stop (stops all the MinIO instances in a setup)
|
||||
// - freeze (freezes all incoming S3 API calls)
|
||||
// - unfreeze (unfreezes previously frozen S3 API calls)
|
||||
func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Service")
|
||||
|
||||
@@ -205,6 +211,10 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
serviceSig = serviceRestart
|
||||
case madmin.ServiceActionStop:
|
||||
serviceSig = serviceStop
|
||||
case madmin.ServiceActionFreeze:
|
||||
serviceSig = serviceFreeze
|
||||
case madmin.ServiceActionUnfreeze:
|
||||
serviceSig = serviceUnFreeze
|
||||
default:
|
||||
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
@@ -217,6 +227,8 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
||||
case serviceFreeze, serviceUnFreeze:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceFreezeAdminAction)
|
||||
}
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -233,7 +245,14 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
// Reply to the client before restarting, stopping MinIO server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
globalServiceSignalCh <- serviceSig
|
||||
switch serviceSig {
|
||||
case serviceFreeze:
|
||||
freezeServices()
|
||||
case serviceUnFreeze:
|
||||
unfreezeServices()
|
||||
case serviceRestart, serviceStop:
|
||||
globalServiceSignalCh <- serviceSig
|
||||
}
|
||||
}
|
||||
|
||||
// ServerProperties holds some server information such as, version, region
|
||||
@@ -266,6 +285,7 @@ type ServerHTTPAPIStats struct {
|
||||
// including their average execution time.
|
||||
type ServerHTTPStats struct {
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
S3RequestsIncoming uint64 `json:"s3RequestsIncoming"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
@@ -316,7 +336,6 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
}
|
||||
|
||||
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
||||
@@ -373,10 +392,10 @@ func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries {
|
||||
}
|
||||
for k, v := range peerLock.Locks {
|
||||
for _, lockReqInfo := range v {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
if val, ok := entryMap[lockReqInfo.Name]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
entryMap[lockReqInfo.Name] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -450,7 +469,7 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
count := 10 // by default list only top 10 entries
|
||||
if countStr := r.URL.Query().Get("count"); countStr != "" {
|
||||
if countStr := r.Form.Get("count"); countStr != "" {
|
||||
var err error
|
||||
count, err = strconv.Atoi(countStr)
|
||||
if err != nil {
|
||||
@@ -458,7 +477,7 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
}
|
||||
stale := r.URL.Query().Get("stale") == "true" // list also stale locks
|
||||
stale := r.Form.Get("stale") == "true" // list also stale locks
|
||||
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
||||
|
||||
@@ -713,7 +732,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
hip, errCode := extractHealInitParams(mux.Vars(r), r.URL.Query(), r.Body)
|
||||
hip, errCode := extractHealInitParams(mux.Vars(r), r.Form, r.Body)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
return
|
||||
@@ -756,13 +775,17 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
// Send whitespace and keep connection open
|
||||
w.Write([]byte(" "))
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case hr := <-respCh:
|
||||
switch hr.apiErr {
|
||||
case noError:
|
||||
if started {
|
||||
w.Write(hr.respBytes)
|
||||
if _, err := w.Write(hr.respBytes); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
} else {
|
||||
writeSuccessResponseJSON(w, hr.respBytes)
|
||||
@@ -787,7 +810,9 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
w.WriteHeader(hr.apiErr.HTTPStatusCode)
|
||||
}
|
||||
w.Write(errorRespJSON)
|
||||
if _, err := w.Write(errorRespJSON); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
break forLoop
|
||||
@@ -911,12 +936,65 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SpeedtestHandler")
|
||||
// NetperfHandler - perform mesh style network throughput test
|
||||
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetperfHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsDistErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "netperf")
|
||||
lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
|
||||
return
|
||||
}
|
||||
defer nsLock.Unlock(lkctx.Cancel)
|
||||
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
|
||||
if duration < globalNetPerfMinDuration {
|
||||
// We need sample size of minimum 10 secs.
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
|
||||
duration = duration.Round(time.Second)
|
||||
|
||||
results := globalNotificationSys.Netperf(ctx, duration)
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(madmin.NetperfResult{NodeResults: results}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SpeedtestHandler - Deprecated. See ObjectSpeedtestHandler
|
||||
func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
a.ObjectSpeedtestHandler(w, r)
|
||||
}
|
||||
|
||||
// ObjectSpeedtestHandler - reports maximum speed of a cluster by performing PUT and
|
||||
// GET operations on the server, supports auto tuning by default by automatically
|
||||
// increasing concurrency and stopping when we have reached the limits on the
|
||||
// system.
|
||||
func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ObjectSpeedtestHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -926,9 +1004,11 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
sizeStr := r.URL.Query().Get(peerRESTSize)
|
||||
durationStr := r.URL.Query().Get(peerRESTDuration)
|
||||
concurrentStr := r.URL.Query().Get(peerRESTConcurrent)
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
autotune := r.Form.Get("autotune") == "true"
|
||||
storageClass := r.Form.Get("storage-class")
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
@@ -940,52 +1020,170 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
||||
concurrent = 32
|
||||
}
|
||||
|
||||
if runtime.GOMAXPROCS(0) < concurrent {
|
||||
concurrent = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = time.Second * 10
|
||||
}
|
||||
|
||||
results := globalNotificationSys.Speedtest(ctx, size, concurrent, duration)
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
capacityNeeded := uint64(concurrent * size)
|
||||
capacity := uint64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo))
|
||||
|
||||
if err := json.NewEncoder(w).Encode(results); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
if capacity < capacityNeeded {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
||||
Code: "XMinioSpeedtestInsufficientCapacity",
|
||||
Message: fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
|
||||
humanize.IBytes(capacityNeeded), humanize.IBytes(capacity)),
|
||||
StatusCode: http.StatusInsufficientStorage,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objectAPI.DeleteBucket(ctx, pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix), true)
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
// Verify if we can employ autotune without running out of capacity,
|
||||
// if we do run out of capacity, make sure to turn-off autotuning
|
||||
// in such situations.
|
||||
newConcurrent := concurrent + (concurrent+1)/2
|
||||
autoTunedCapacityNeeded := uint64(newConcurrent * size)
|
||||
if autotune && capacity < autoTunedCapacityNeeded {
|
||||
// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
|
||||
autotune = false
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
deleteBucket := func() {
|
||||
objectAPI.DeleteBucket(context.Background(), pathJoin(minioMetaBucket, "speedtest"), DeleteBucketOptions{
|
||||
Force: true,
|
||||
NoRecreate: true,
|
||||
})
|
||||
}
|
||||
defer deleteBucket()
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
ch := speedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NetSpeedtestHandler - reports maximum network throughput
|
||||
func (a adminAPIHandlers) NetSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetSpeedtestHandler")
|
||||
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
}
|
||||
|
||||
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
|
||||
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DriveSpeedtestHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
type AdminError struct {
|
||||
Code string
|
||||
Message string
|
||||
StatusCode int
|
||||
}
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
func (ae AdminError) Error() string {
|
||||
return ae.Message
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
serial := r.Form.Get("serial") == "true"
|
||||
blockSizeStr := r.Form.Get("blocksize")
|
||||
fileSizeStr := r.Form.Get("filesize")
|
||||
|
||||
blockSize, err := strconv.ParseUint(blockSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
blockSize = 4 * humanize.MiByte // default value
|
||||
}
|
||||
|
||||
fileSize, err := strconv.ParseUint(fileSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
fileSize = 1 * humanize.GiByte // default value
|
||||
}
|
||||
|
||||
opts := madmin.DriveSpeedTestOpts{
|
||||
Serial: serial,
|
||||
BlockSize: blockSize,
|
||||
FileSize: fileSize,
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
ch := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// local driveSpeedTest
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
enc.Encode(driveSpeedTest(ctx, opts))
|
||||
if wf, ok := w.(http.Flusher); ok {
|
||||
wf.Flush()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
goto endloop
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
||||
goto endloop
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
goto endloop
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
goto endloop
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
endloop:
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Admin API errors
|
||||
@@ -995,119 +1193,6 @@ const (
|
||||
AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure"
|
||||
)
|
||||
|
||||
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
|
||||
// specific error.
|
||||
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
||||
switch err {
|
||||
case errErasureWriteQuorum:
|
||||
return ErrAdminConfigNoQuorum
|
||||
default:
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
if err == nil {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr APIError
|
||||
switch e := err.(type) {
|
||||
case iampolicy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioMalformedIAMPolicy",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case config.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case AdminError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
case errors.Is(err, errIAMActionNotAllowed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMActionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
case errors.Is(err, errIAMNotInitialized):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
|
||||
// Tier admin API errors
|
||||
case errors.Is(err, madmin.ErrTierNameEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierNameEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfigVersion",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierTypeUnsupported):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierTypeUnsupported",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errIsTierPermError(err):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientPermissions",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// Returns true if the madmin.TraceInfo should be traced,
|
||||
// false if certain conditions are not met.
|
||||
// - input entry is not of the type *madmin.TraceInfo*
|
||||
@@ -1157,7 +1242,7 @@ func mustTrace(entry interface{}, opts madmin.ServiceTraceOpts) (shouldTrace boo
|
||||
}
|
||||
|
||||
func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err error) {
|
||||
q := r.URL.Query()
|
||||
q := r.Form
|
||||
|
||||
opts.OnlyErrors = q.Get("err") == "true"
|
||||
opts.S3 = q.Get("s3") == "true"
|
||||
@@ -1259,15 +1344,15 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
node := r.URL.Query().Get("node")
|
||||
node := r.Form.Get("node")
|
||||
// limit buffered console entries if client requested it.
|
||||
limitStr := r.URL.Query().Get("limit")
|
||||
limitStr := r.Form.Get("limit")
|
||||
limitLines, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
limitLines = 10
|
||||
}
|
||||
|
||||
logKind := r.URL.Query().Get("logType")
|
||||
logKind := r.Form.Get("logType")
|
||||
if logKind == "" {
|
||||
logKind = string(logger.All)
|
||||
}
|
||||
@@ -1308,10 +1393,10 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
if err := enc.Encode(log); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(logCh) > 0 {
|
||||
@@ -1342,7 +1427,7 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
if err := GlobalKMS.CreateKey(r.URL.Query().Get("key-id")); err != nil {
|
||||
if err := GlobalKMS.CreateKey(r.Form.Get("key-id")); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1409,11 +1494,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
keyID := r.URL.Query().Get("key-id")
|
||||
keyID := r.Form.Get("key-id")
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
}
|
||||
var response = madmin.KMSKeyStatus{
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
}
|
||||
|
||||
@@ -1470,6 +1555,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
//nolint:gocritic
|
||||
if err != nil {
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
@@ -1551,7 +1637,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalServerRegion,
|
||||
Region: globalSite.Region,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
DeploymentID: globalDeploymentID,
|
||||
Buckets: buckets,
|
||||
@@ -1576,7 +1662,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
query := r.URL.Query()
|
||||
query := r.Form
|
||||
healthInfo := madmin.HealthInfo{Version: madmin.HealthInfoVersion}
|
||||
healthInfoCh := make(chan madmin.HealthInfo)
|
||||
|
||||
@@ -1599,8 +1685,11 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
logger.LogIf(ctx, enc.Encode(healthInfo))
|
||||
}
|
||||
|
||||
deadline := 1 * time.Hour
|
||||
if dstr := r.URL.Query().Get("deadline"); dstr != "" {
|
||||
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
|
||||
if query.Get("perfnet") != "" || query.Get("perfdrive") != "" {
|
||||
deadline = 1 * time.Hour
|
||||
}
|
||||
if dstr := r.Form.Get("deadline"); dstr != "" {
|
||||
var err error
|
||||
deadline, err = time.ParseDuration(dstr)
|
||||
if err != nil {
|
||||
@@ -1715,11 +1804,43 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
getAndWriteSysConfig := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysConfig)) == "true" {
|
||||
localSysConfig := madmin.GetSysConfig(deadlinedCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysConfig)
|
||||
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, localSysConfig)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysConfig := globalNotificationSys.GetSysConfig(deadlinedCtx)
|
||||
for _, sc := range peerSysConfig {
|
||||
anonymizeAddr(&sc)
|
||||
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, sc)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
getAndWriteSysServices := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysServices)) == "true" {
|
||||
localSysServices := madmin.GetSysServices(deadlinedCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysServices)
|
||||
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, localSysServices)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysServices := globalNotificationSys.GetSysServices(deadlinedCtx)
|
||||
for _, ss := range peerSysServices {
|
||||
anonymizeAddr(&ss)
|
||||
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, ss)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
anonymizeCmdLine := func(cmdLine string) string {
|
||||
if !globalIsDistErasure {
|
||||
// FS mode - single server - hard code to `server1`
|
||||
anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1)
|
||||
return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1)
|
||||
anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
|
||||
return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
|
||||
}
|
||||
|
||||
// Server start command regex groups:
|
||||
@@ -1864,7 +1985,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
anonNetwork[anonEndpoint] = status
|
||||
}
|
||||
return anonNetwork
|
||||
|
||||
}
|
||||
|
||||
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
||||
@@ -1889,6 +2009,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
getAndWriteDrivePerfInfo()
|
||||
getAndWriteNetPerfInfo()
|
||||
getAndWriteSysErrors()
|
||||
getAndWriteSysServices()
|
||||
getAndWriteSysConfig()
|
||||
|
||||
if query.Get("minioinfo") == "true" {
|
||||
infoMessage := getServerInfo(ctx, r)
|
||||
@@ -1913,6 +2035,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
tls := getTLSInfo()
|
||||
isK8s := IsKubernetes()
|
||||
isDocker := IsDocker()
|
||||
healthInfo.Minio.Info = madmin.MinioInfo{
|
||||
Mode: infoMessage.Mode,
|
||||
Domain: infoMessage.Domain,
|
||||
@@ -1925,6 +2051,9 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Services: infoMessage.Services,
|
||||
Backend: infoMessage.Backend,
|
||||
Servers: servers,
|
||||
TLS: &tls,
|
||||
IsKubernetes: &isK8s,
|
||||
IsDocker: &isDocker,
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
@@ -1939,19 +2068,41 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, enc.Encode(oinfo))
|
||||
w.(http.Flusher).Flush()
|
||||
if err := enc.Encode(oinfo); err != nil {
|
||||
return
|
||||
}
|
||||
if len(healthInfoCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-ticker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-deadlinedCtx.Done():
|
||||
w.(http.Flusher).Flush()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTLSInfo() madmin.TLSInfo {
|
||||
tlsInfo := madmin.TLSInfo{
|
||||
TLSEnabled: globalIsTLS,
|
||||
Certs: []madmin.TLSCert{},
|
||||
}
|
||||
|
||||
if globalIsTLS {
|
||||
for _, c := range globalPublicCerts {
|
||||
tlsInfo.Certs = append(tlsInfo.Certs, madmin.TLSCert{
|
||||
PubKeyAlgo: c.PublicKeyAlgorithm.String(),
|
||||
SignatureAlgo: c.SignatureAlgorithm.String(),
|
||||
NotBefore: c.NotBefore,
|
||||
NotAfter: c.NotAfter,
|
||||
})
|
||||
}
|
||||
}
|
||||
return tlsInfo
|
||||
}
|
||||
|
||||
// BandwidthMonitorHandler - GET /minio/admin/v3/bandwidth
|
||||
@@ -1975,7 +2126,7 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
reportCh := make(chan madmin.BucketBandwidthReport)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequestedString := r.Form.Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
@@ -1988,17 +2139,21 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
if err := enc.Encode(report); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
if len(reportCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
@@ -2054,7 +2209,6 @@ func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
@@ -2141,7 +2295,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
for _, target := range logger.SystemTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
@@ -2157,7 +2311,7 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, target := range logger.AuditTargets {
|
||||
for _, target := range logger.AuditTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
@@ -2181,19 +2335,9 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
||||
defer cancel()
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(timeout),
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
}}
|
||||
defer client.CloseIdleConnections()
|
||||
client := &http.Client{
|
||||
Transport: globalProxyTransport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, endpointStr, nil)
|
||||
if err != nil {
|
||||
@@ -2210,7 +2354,7 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
|
||||
// getRawDataer provides an interface for getting raw FS files.
|
||||
type getRawDataer interface {
|
||||
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error
|
||||
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error
|
||||
}
|
||||
|
||||
// InspectDataHandler - GET /minio/admin/v3/inspect-data
|
||||
@@ -2233,8 +2377,8 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
volume := r.URL.Query().Get("volume")
|
||||
file := r.URL.Query().Get("file")
|
||||
volume := r.Form.Get("volume")
|
||||
file := r.Form.Get("file")
|
||||
if len(volume) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL)
|
||||
return
|
||||
@@ -2243,6 +2387,13 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
|
||||
|
||||
// Reject attempts to traverse parent or absolute paths.
|
||||
if strings.Contains(file, "..") || strings.Contains(volume, "..") {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var key [32]byte
|
||||
// MUST use crypto/rand
|
||||
@@ -2279,16 +2430,23 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
// of profiling data of all nodes
|
||||
zipWriter := zip.NewWriter(encw)
|
||||
defer zipWriter.Close()
|
||||
|
||||
err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, size int64, modtime time.Time) error {
|
||||
rawDataFn := func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
||||
// Prefix host+disk
|
||||
filename = path.Join(host, disk, filename)
|
||||
if si.Dir {
|
||||
filename += "/"
|
||||
si.Size = 0
|
||||
}
|
||||
if si.Mode == 0 {
|
||||
// Not, set it to default.
|
||||
si.Mode = 0o600
|
||||
}
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
size: size,
|
||||
mode: 0600,
|
||||
modTime: modtime,
|
||||
isDir: false,
|
||||
size: si.Size,
|
||||
mode: os.FileMode(si.Mode),
|
||||
modTime: si.ModTime,
|
||||
isDir: si.Dir,
|
||||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
@@ -2305,8 +2463,33 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
err = o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if volume != minioMetaBucket && file != formatConfigFile {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
// save args passed to inspect command
|
||||
inspectArgs := []string{fmt.Sprintf(" Inspect path: %s%s%s\n", volume, slashSeparator, file)}
|
||||
cmdLine := []string{"Server command line args: "}
|
||||
for _, pool := range globalEndpoints {
|
||||
cmdLine = append(cmdLine, pool.CmdLine)
|
||||
}
|
||||
cmdLine = append(cmdLine, "\n")
|
||||
inspectArgs = append(inspectArgs, cmdLine...)
|
||||
inspectArgsBytes := []byte(strings.Join(inspectArgs, " "))
|
||||
if err = rawDataFn(bytes.NewReader(inspectArgsBytes), "", "", "inspect-input.txt", StatInfo{
|
||||
Size: int64(len(inspectArgsBytes)),
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createHostAnonymizerForFSMode() map[string]string {
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
@@ -40,11 +41,13 @@ type adminErasureTestBed struct {
|
||||
erasureDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
done context.CancelFunc
|
||||
}
|
||||
|
||||
// prepareAdminErasureTestBed - helper function that setups a single-node
|
||||
// Erasure backend for admin-handler tests.
|
||||
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
@@ -56,11 +59,13 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
cancel()
|
||||
return nil, xlErr
|
||||
}
|
||||
|
||||
// Initialize minio server config.
|
||||
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -69,11 +74,11 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.InitStore(objLayer)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
@@ -83,12 +88,14 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
erasureDirs: erasureDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
done: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminErasureTestBed) TearDown() {
|
||||
atb.done()
|
||||
removeRoots(atb.erasureDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
@@ -229,8 +236,8 @@ func TestServiceRestartHandler(t *testing.T) {
|
||||
|
||||
// buildAdminRequest - helper function to build an admin API request.
|
||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error,
|
||||
) {
|
||||
req, err := newTestRequest(method,
|
||||
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
||||
contentLength, bodySeeker)
|
||||
@@ -373,5 +380,4 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
@@ -277,8 +278,8 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
respBytes []byte, apiErr APIError, errMsg string,
|
||||
) {
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
@@ -337,8 +338,8 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
clientToken string) ([]byte, APIErrorCode,
|
||||
) {
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
@@ -396,9 +397,6 @@ type healSequence struct {
|
||||
// bucket, and object on which heal seq. was initiated
|
||||
bucket, object string
|
||||
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
@@ -455,8 +453,8 @@ type healSequence struct {
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
hs madmin.HealOpts, forceStart bool,
|
||||
) *healSequence {
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
@@ -493,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 {
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
count += v
|
||||
}
|
||||
return count
|
||||
}
|
||||
@@ -648,11 +646,7 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
||||
h.currentStatus.StartTime = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal(objAPI)
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
go h.traverseAndHeal(objAPI)
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
@@ -696,45 +690,39 @@ func (h *healSequence) logHeal(healType madmin.HealItemType) {
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
respCh: h.respCh,
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
}
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
select {
|
||||
case globalBackgroundHealRoutine.tasks <- task:
|
||||
if serverDebugLog {
|
||||
logger.Info("Task in the queue: %#v", task)
|
||||
}
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
@@ -759,11 +747,6 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
@@ -776,48 +759,6 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
for {
|
||||
select {
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch source.bucket {
|
||||
case nopHeal:
|
||||
continue
|
||||
case SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
default:
|
||||
if source.object == "" {
|
||||
itemType = madmin.HealItemBucket
|
||||
} else {
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
}
|
||||
|
||||
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
// Start healing the config prefix.
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
@@ -862,11 +803,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
@@ -915,9 +851,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
@@ -931,9 +865,6 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -943,11 +874,7 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
}
|
||||
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -962,6 +889,11 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
opts: &h.settings,
|
||||
}, madmin.HealItemObject)
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,27 +38,20 @@ type adminAPIHandlers struct{}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
||||
/// Service operations
|
||||
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
}
|
||||
|
||||
gz := func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return h
|
||||
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err != nil {
|
||||
// Static params, so this is very unlikely.
|
||||
logger.Fatal(err, "Unable to initialize server")
|
||||
}
|
||||
|
||||
wrapper, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err == nil {
|
||||
gz = func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return wrapper(h).(http.HandlerFunc)
|
||||
}
|
||||
}
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(gz(httpTraceAll(adminAPI.ServiceHandler))).Queries("action", "{action:.*}")
|
||||
@@ -74,17 +68,20 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
|
||||
|
||||
/// Health operations
|
||||
// Pool operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/pools/list").HandlerFunc(gz(httpTraceAll(adminAPI.ListPools)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/pools/status").HandlerFunc(gz(httpTraceAll(adminAPI.StatusPool))).Queries("pool", "{pool:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(gz(httpTraceAll(adminAPI.StartDecommission))).Queries("pool", "{pool:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(gz(httpTraceAll(adminAPI.CancelDecommission))).Queries("pool", "{pool:.*}")
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
@@ -109,7 +106,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))
|
||||
@@ -194,6 +191,26 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
@@ -205,9 +222,12 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
}
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(httpTraceHdrs(adminAPI.ObjectSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(httpTraceHdrs(adminAPI.DriveSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(httpTraceHdrs(adminAPI.NetperfHandler))
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(adminAPI.TraceHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(http.HandlerFunc(adminAPI.TraceHandler)))
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(gz(httpTraceAll(adminAPI.ConsoleLogHandler)))
|
||||
|
||||
@@ -50,7 +50,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
|
||||
@@ -28,14 +28,10 @@ type DeletedObject struct {
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
|
||||
ObjectName string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
|
||||
// MinIO extensions to support delete marker replication
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus,omitempty"`
|
||||
// MTime of DeleteMarker on source that needs to be propagated to replica
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"-"`
|
||||
// MinIO extensions to support delete marker replication
|
||||
ReplicationState ReplicationState `xml:"-"`
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
@@ -52,16 +48,23 @@ func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElem
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
// ObjectV object version key/versionId
|
||||
type ObjectV struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectV
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
|
||||
// Version ID of delete marker
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
|
||||
// VersionPurgeStatuses holds the internal
|
||||
VersionPurgeStatuses string `xml:"VersionPurgeStatuses"`
|
||||
// ReplicateDecisionStr stringified representation of replication decision
|
||||
ReplicateDecisionStr string `xml:"-"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
|
||||
@@ -20,9 +20,11 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
@@ -80,6 +82,7 @@ const (
|
||||
ErrIncompleteBody
|
||||
ErrInternalError
|
||||
ErrInvalidAccessKeyID
|
||||
ErrAccessKeyDisabled
|
||||
ErrInvalidBucketName
|
||||
ErrInvalidDigest
|
||||
ErrInvalidRange
|
||||
@@ -107,6 +110,7 @@ const (
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrInvalidLifecycleWithObjectLock
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
@@ -126,6 +130,7 @@ const (
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrReplicationDenyEditError
|
||||
ErrReplicationNoMatchingRuleError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
@@ -207,6 +212,7 @@ const (
|
||||
ErrInvalidSSECustomerParameters
|
||||
ErrIncompatibleEncryptionMethod
|
||||
ErrKMSNotConfigured
|
||||
ErrKMSKeyNotFoundException
|
||||
|
||||
ErrNoAccessKey
|
||||
ErrInvalidToken
|
||||
@@ -265,6 +271,16 @@ const (
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
|
||||
// Site-Replication errors
|
||||
ErrSiteReplicationInvalidRequest
|
||||
ErrSiteReplicationPeerResp
|
||||
ErrSiteReplicationBackendIssue
|
||||
ErrSiteReplicationServiceAccountError
|
||||
ErrSiteReplicationBucketConfigError
|
||||
ErrSiteReplicationBucketMetaError
|
||||
ErrSiteReplicationIAMError
|
||||
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
ErrAdminNoSuchQuotaConfiguration
|
||||
@@ -383,10 +399,10 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalServerRegion != "" {
|
||||
if globalSite.Region != "" {
|
||||
switch errCode {
|
||||
case ErrAuthorizationHeaderMalformed:
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -442,7 +458,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrInvalidMaxParts: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Argument max-parts must be an integer between 0 and 2147483647",
|
||||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartNumberMarker: {
|
||||
@@ -500,6 +516,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccessKeyDisabled: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "Your account is disabled; please contact your administrator.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
Code: "InvalidBucketName",
|
||||
Description: "The specified bucket is not valid.",
|
||||
@@ -565,6 +586,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrInvalidLifecycleWithObjectLock: {
|
||||
Code: "InvalidLifecycleWithObjectLock",
|
||||
Description: "The lifecycle configuration containing MaxNoncurrentVersions is not supported with object locking",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchBucketSSEConfig: {
|
||||
Code: "ServerSideEncryptionConfigurationNotFoundError",
|
||||
Description: "The server side encryption configuration was not found",
|
||||
@@ -662,7 +688,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrAllAccessDisabled: {
|
||||
Code: "AllAccessDisabled",
|
||||
Description: "All access to this bucket has been disabled.",
|
||||
Description: "All access to this resource has been disabled.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMalformedPolicy: {
|
||||
@@ -870,6 +896,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "No matching replication rule found for this object prefix",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationDenyEditError: {
|
||||
Code: "XMinioReplicationDenyEdit",
|
||||
Description: "Cannot alter local replication config since this server is in a cluster replication setup",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
@@ -961,7 +992,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
/// Bucket notification related errors.
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "A specified event is not supported for notifications.",
|
||||
@@ -1097,6 +1128,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Server side encryption specified but KMS is not configured",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrKMSKeyNotFoundException: {
|
||||
Code: "KMS.NotFoundException",
|
||||
Description: "Invalid keyId",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoAccessKey: {
|
||||
Code: "AccessDenied",
|
||||
Description: "No AWSAccessKey was presented",
|
||||
@@ -1108,14 +1144,14 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
Code: "XAmzContentSHA256Mismatch",
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
/// MinIO extensions.
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
@@ -1267,6 +1303,43 @@ var errorCodes = errorCodeMap{
|
||||
Description: errObjectTampered.Error(),
|
||||
HTTPStatusCode: http.StatusPartialContent,
|
||||
},
|
||||
|
||||
ErrSiteReplicationInvalidRequest: {
|
||||
Code: "XMinioSiteReplicationInvalidRequest",
|
||||
Description: "Invalid site-replication request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrSiteReplicationPeerResp: {
|
||||
Code: "XMinioSiteReplicationPeerResp",
|
||||
Description: "Error received when contacting a peer site",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrSiteReplicationBackendIssue: {
|
||||
Code: "XMinioSiteReplicationBackendIssue",
|
||||
Description: "Error when requesting object layer backend",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrSiteReplicationServiceAccountError: {
|
||||
Code: "XMinioSiteReplicationServiceAccountError",
|
||||
Description: "Site replication related service account error",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrSiteReplicationBucketConfigError: {
|
||||
Code: "XMinioSiteReplicationBucketConfigError",
|
||||
Description: "Error while configuring replication on a bucket",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrSiteReplicationBucketMetaError: {
|
||||
Code: "XMinioSiteReplicationBucketMetaError",
|
||||
Description: "Error while replicating bucket metadata",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrSiteReplicationIAMError: {
|
||||
Code: "XMinioSiteReplicationIAMError",
|
||||
Description: "Error while replicating an IAM item",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
|
||||
ErrMaximumExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
|
||||
@@ -1313,15 +1386,15 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrBackendDown: {
|
||||
Code: "XMinioBackendDown",
|
||||
Description: "Object storage backend is unreachable",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
Description: "Remote backend is unreachable",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectContinuationToken: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The continuation token provided is incorrect",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
//S3 Select API Errors
|
||||
// S3 Select API Errors
|
||||
ErrEmptyRequestBody: {
|
||||
Code: "EmptyRequestBody",
|
||||
Description: "Request body cannot be empty.",
|
||||
@@ -1785,12 +1858,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrOperationTimedOut
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if contextCanceled(ctx) {
|
||||
if ctx.Err() == context.Canceled {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
switch err {
|
||||
@@ -1847,6 +1918,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrIncompatibleEncryptionMethod
|
||||
case errKMSNotConfigured:
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case errKMSKeyNotFound:
|
||||
apiErr = ErrKMSKeyNotFoundException
|
||||
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
@@ -1909,6 +1983,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectLocked:
|
||||
apiErr = ErrObjectLocked
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
@@ -1965,8 +2041,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrReplicationConfigurationNotFoundError
|
||||
case BucketRemoteDestinationNotFound:
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketReplicationDestinationMissingLock:
|
||||
apiErr = ErrReplicationDestinationMissingLock
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
@@ -2027,6 +2101,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
//nolint:gocritic
|
||||
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
|
||||
"request declared a Content-Length of %d but only wrote %d bytes",
|
||||
&ie, &iw); ferr != nil {
|
||||
@@ -2059,7 +2134,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
@@ -2082,6 +2157,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
}
|
||||
|
||||
if apiErr.Code == "XMinioBackendDown" {
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
return apiErr
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
@@ -2167,7 +2247,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// since S3 only sends one Error XML response.
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case azblob.StorageError:
|
||||
apiErr = APIError{
|
||||
@@ -2177,10 +2256,31 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
default:
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
//nolint:gocritic
|
||||
if errors.Is(err, errMalformedEncoding) {
|
||||
apiErr = APIError{
|
||||
Code: "BadRequest",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
} else if errors.Is(err, errChunkTooBig) {
|
||||
apiErr = APIError{
|
||||
Code: "BadRequest",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
} else if errors.Is(err, strconv.ErrRange) {
|
||||
apiErr = APIError{
|
||||
Code: "BadRequest",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
} else {
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2206,7 +2306,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalServerRegion,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
if region := globalSite.Region; region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -126,6 +126,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
// Empty values for object lock and retention can be skipped.
|
||||
if v == "" && equals(k, xhttp.AmzObjectLockMode, xhttp.AmzObjectLockRetainUntilDate) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
@@ -187,7 +192,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionTier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
func TestNewRequestID(t *testing.T) {
|
||||
// Ensure that it returns an alphanumeric result of length 16.
|
||||
var id = mustGetRequestID(UTCNow())
|
||||
id := mustGetRequestID(UTCNow())
|
||||
|
||||
if len(id) != 16 {
|
||||
t.Fail()
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
@@ -36,11 +37,11 @@ import (
|
||||
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 1000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -262,12 +263,11 @@ func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error
|
||||
return e.EncodeElement(objectVersionWrapper(o), start)
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string.
|
||||
// StringMap is a map[string]string
|
||||
type StringMap map[string]string
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
@@ -416,17 +416,17 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{
|
||||
data := ListBucketsResponse{}
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
var listbucket = Bucket{}
|
||||
listbucket.Name = bucket.Name
|
||||
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
|
||||
listbuckets = append(listbuckets, listbucket)
|
||||
listbuckets = append(listbuckets, Bucket{
|
||||
Name: bucket.Name,
|
||||
CreationDate: bucket.Created.UTC().Format(iso8601TimeFormat),
|
||||
})
|
||||
}
|
||||
|
||||
data.Owner = owner
|
||||
@@ -438,14 +438,14 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
data := ListVersionsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
content := ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -485,7 +485,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@@ -496,14 +496,14 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
data := ListObjectsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -534,7 +534,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@@ -545,14 +545,14 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
data := ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -570,6 +570,14 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
case crypto.S3KMS:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
}
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
@@ -599,7 +607,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
||||
}
|
||||
@@ -715,9 +723,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
if !quiet {
|
||||
deleteResp.DeletedObjects = deletedObjects
|
||||
}
|
||||
if len(errs) == len(deletedObjects) {
|
||||
deleteResp.DeletedObjects = nil
|
||||
}
|
||||
deleteResp.Errors = errs
|
||||
return deleteResp
|
||||
}
|
||||
@@ -731,7 +736,6 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
||||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
w.Write(response)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -782,9 +786,9 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
}
|
||||
|
||||
// Generate error response.
|
||||
@@ -816,8 +820,8 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||
errBody string, reqURL *url.URL) {
|
||||
|
||||
errBody string, reqURL *url.URL,
|
||||
) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
|
||||
@@ -24,7 +24,9 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/minio/console/restapi"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
@@ -41,6 +43,18 @@ func setHTTPServer(h *xhttp.Server) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newConsoleServerFn() *restapi.Server {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalConsoleSrv
|
||||
}
|
||||
|
||||
func setConsoleSrv(srv *restapi.Server) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalConsoleSrv = srv
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
@@ -208,15 +222,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
gz := func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return h
|
||||
}
|
||||
|
||||
wrapper, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err == nil {
|
||||
gz = func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return wrapper(h).(http.HandlerFunc)
|
||||
}
|
||||
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err != nil {
|
||||
// Static params, so this is very unlikely.
|
||||
logger.Fatal(err, "Unable to initialize server")
|
||||
}
|
||||
|
||||
for _, router := range routers {
|
||||
@@ -245,7 +254,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("listobjectparts", maxClients(gz(httpTraceAll(api.ListObjectPartsHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("completemutipartupload", maxClients(gz(httpTraceAll(api.CompleteMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("completemultipartupload", maxClients(gz(httpTraceAll(api.CompleteMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("newmultipartupload", maxClients(gz(httpTraceAll(api.NewMultipartUploadHandler))))).Queries("uploads", "")
|
||||
@@ -278,7 +287,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("getobjectlegalhold", maxClients(gz(httpTraceAll(api.GetObjectLegalHoldHandler))))).Queries("legal-hold", "")
|
||||
// GetObject - note gzip compression is *not* added due to Range requests.
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
|
||||
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectHandler)))))
|
||||
// CopyObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(gz(httpTraceAll(api.CopyObjectHandler)))))
|
||||
@@ -305,7 +314,8 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
|
||||
@@ -333,6 +343,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstatus", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStatusHandler))))).Queries("replication-reset-status", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
@@ -359,7 +372,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
// DeleteBucketWebsiteHandler
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
@@ -407,9 +420,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// PutBucketNotification
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketnotification", maxClients(gz(httpTraceAll(api.PutBucketNotificationHandler))))).Queries("notification", "")
|
||||
// ResetBucketReplicationState - MinIO extension API
|
||||
// ResetBucketReplicationStart - MinIO extension API
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstate", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStateHandler))))).Queries("replication-reset", "")
|
||||
collectAPIStats("resetbucketreplicationstart", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStartHandler))))).Queries("replication-reset", "")
|
||||
|
||||
// PutBucket
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucket", maxClients(gz(httpTraceAll(api.PutBucketHandler)))))
|
||||
@@ -456,7 +470,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
@@ -474,7 +488,6 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
|
||||
@@ -44,7 +44,6 @@ func TestS3EncodeName(t *testing.T) {
|
||||
if testCase.expectedOutput != outputText {
|
||||
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -27,6 +27,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
@@ -61,13 +62,13 @@ func isRequestSignatureV2(r *http.Request) bool {
|
||||
|
||||
// Verify if request has AWS PreSign Version '4'.
|
||||
func isRequestPresignedSignatureV4(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()[xhttp.AmzCredential]
|
||||
_, ok := r.Form[xhttp.AmzCredential]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Verify request has AWS PreSign Version '2'.
|
||||
func isRequestPresignedSignatureV2(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()[xhttp.AmzAccessKeyID]
|
||||
_, ok := r.Form[xhttp.AmzAccessKeyID]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -102,6 +103,14 @@ const (
|
||||
|
||||
// Get request authentication type.
|
||||
func getRequestAuthType(r *http.Request) authType {
|
||||
if r.URL != nil {
|
||||
var err error
|
||||
r.Form, err = url.ParseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
logger.LogIf(r.Context(), err)
|
||||
return authTypeUnknown
|
||||
}
|
||||
}
|
||||
if isRequestSignatureV2(r) {
|
||||
return authTypeSignedV2
|
||||
} else if isRequestPresignedSignatureV2(r) {
|
||||
@@ -116,7 +125,7 @@ func getRequestAuthType(r *http.Request) authType {
|
||||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()[xhttp.Action]; ok {
|
||||
} else if _, ok := r.Form[xhttp.Action]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header[xhttp.Authorization]; !ok {
|
||||
return authTypeAnonymous
|
||||
@@ -129,7 +138,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
var owner bool
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
getRequestAuthType(r) == authTypeSigned {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
@@ -146,12 +155,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, claims, owner, ErrNone
|
||||
return cred, cred.Claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuth checks for authentication and authorization for the incoming
|
||||
@@ -183,7 +187,7 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get(xhttp.AmzSecurityToken)
|
||||
return r.Form.Get(xhttp.AmzSecurityToken)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
@@ -193,13 +197,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
if token == "" {
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -208,9 +206,15 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
claims, err := auth.ExtractClaims(token, secret)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
if subtle.ConstantTimeCompare([]byte(secret), []byte(globalActiveCred.SecretKey)) == 1 {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims, err = auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
}
|
||||
|
||||
// If OPA is set, return without any further checks.
|
||||
@@ -234,39 +238,53 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
}
|
||||
|
||||
// If LDAP claim key is set, return here.
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Session token must have a policy, reject requests without policy
|
||||
// claim.
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
|
||||
if token == "" && cred.IsTemp() {
|
||||
// Temporary credentials should always have x-amz-security-token
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
|
||||
if token != "" && !cred.IsTemp() {
|
||||
// x-amz-security-token should not present for static credentials.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
return claims, ErrNone
|
||||
|
||||
if cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
// validate token for temporary credentials only.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
secret := globalActiveCred.SecretKey
|
||||
if cred.IsServiceAccount() {
|
||||
token = cred.SessionToken
|
||||
secret = cred.SecretKey
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
claims, err := getClaimsFromTokenWithSecret(token, secret)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
@@ -295,7 +313,7 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -309,12 +327,6 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
var locationConstraint string
|
||||
if action == policy.CreateBucketAction {
|
||||
@@ -379,10 +391,10 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
@@ -396,10 +408,10 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
@@ -444,7 +456,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
var contentSHA256 []byte
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
if sha256Sum, ok := r.Form[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
if err != nil {
|
||||
return ErrContentSHA256Mismatch
|
||||
@@ -489,20 +501,27 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if isSupportedS3AuthType(aType) {
|
||||
// Let top level caller validate for anonymous and known signed requests.
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte(authErr.Error()))
|
||||
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
amzDate, errCode := parseAmzDateHeader(r)
|
||||
if errCode != ErrNone {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
|
||||
// or in the future, reject request otherwise.
|
||||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
if isSupportedS3AuthType(aType) || aType == authTypeJWT || aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
@@ -511,75 +530,39 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, nil, ErrSignatureVersionNotSupported
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, claims, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
conditions := getConditionValues(r, "", "", nil)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, claims)
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, cred.Claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
@@ -594,7 +577,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
})
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
@@ -605,7 +588,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
@@ -627,18 +610,13 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
@@ -672,10 +650,10 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
req *http.Request
|
||||
authT authType
|
||||
}
|
||||
nopCloser := ioutil.NopCloser(io.LimitReader(&nullReader{}, 1024))
|
||||
testCases := []testCase{
|
||||
// Test case - 1
|
||||
// Check for generic signature v4 header.
|
||||
@@ -54,6 +55,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
"Content-Encoding": []string{streamingContentEncoding},
|
||||
},
|
||||
Method: http.MethodPut,
|
||||
Body: nopCloser,
|
||||
},
|
||||
authT: authTypeStreamingSigned,
|
||||
},
|
||||
@@ -113,6 +115,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
},
|
||||
Method: http.MethodPost,
|
||||
Body: nopCloser,
|
||||
},
|
||||
authT: authTypePostPolicy,
|
||||
},
|
||||
@@ -220,6 +223,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
||||
q := inputReq.URL.Query()
|
||||
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
|
||||
inputReq.URL.RawQuery = q.Encode()
|
||||
inputReq.ParseForm()
|
||||
|
||||
actualResult := isRequestPresignedSignatureV2(inputReq)
|
||||
if testCase.expectedResult != actualResult {
|
||||
@@ -254,6 +258,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
||||
q := inputReq.URL.Query()
|
||||
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
|
||||
inputReq.URL.RawQuery = q.Encode()
|
||||
inputReq.ParseForm()
|
||||
|
||||
actualResult := isRequestPresignedSignatureV4(inputReq)
|
||||
if testCase.expectedResult != actualResult {
|
||||
@@ -357,11 +362,14 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(context.Background(), objLayer)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
globalIAMSys.InitStore(objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -387,10 +395,9 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerRegion, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -428,15 +435,15 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -448,11 +455,11 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(context.Background(), objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.InitStore(objLayer)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
|
||||
@@ -19,10 +19,9 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
@@ -35,7 +34,7 @@ type healTask struct {
|
||||
versionID string
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
respCh chan healResult
|
||||
}
|
||||
|
||||
// healResult represents a healing result with a possible error
|
||||
@@ -46,54 +45,37 @@ type healResult struct {
|
||||
|
||||
// healRoutine receives heal tasks, to heal buckets, objects and format.json
|
||||
type healRoutine struct {
|
||||
tasks chan healTask
|
||||
doneCh chan struct{}
|
||||
tasks chan healTask
|
||||
workers int
|
||||
}
|
||||
|
||||
// Add a new task in the tasks queue
|
||||
func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
// No need to wait run at full speed.
|
||||
if maxIO <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
func systemIO() int {
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
maxIOFn := func() int {
|
||||
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalTrace.NumSubscribers())
|
||||
return int(globalHTTPListen.NumSubscribers()) + int(globalTrace.NumSubscribers())
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq() {
|
||||
var currentIO func() int
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
currentIO = httpServer.GetRequestCount
|
||||
}
|
||||
|
||||
tmpMaxWait := maxWait
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Any requests in progress, delay the heal.
|
||||
for httpServer.GetRequestCount() >= maxIOFn() {
|
||||
if tmpMaxWait > 0 {
|
||||
if tmpMaxWait < waitTick {
|
||||
time.Sleep(tmpMaxWait)
|
||||
} else {
|
||||
time.Sleep(waitTick)
|
||||
}
|
||||
tmpMaxWait = tmpMaxWait - waitTick
|
||||
}
|
||||
if tmpMaxWait <= 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited max %s, resuming", maxWait)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
globalHealConfig.Wait(currentIO, systemIO)
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
@@ -105,6 +87,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
var err error
|
||||
switch task.bucket {
|
||||
case nopHeal:
|
||||
task.respCh <- healResult{err: errSkipFile}
|
||||
continue
|
||||
case SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
@@ -115,10 +98,8 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -126,11 +107,14 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
func newHealRoutine() *healRoutine {
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
doneCh: make(chan struct{}),
|
||||
workers := runtime.GOMAXPROCS(0) / 2
|
||||
if workers == 0 {
|
||||
workers = 4
|
||||
}
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
workers: workers,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
|
||||
@@ -18,12 +18,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -263,7 +263,7 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content - use 'mc admin heal alias/ --verbose' to check the status",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
@@ -277,43 +277,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
for _, disk := range globalLocalDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
if len(disksToHeal) == globalEndpoints.NEndpoints() {
|
||||
// When all disks == all command line endpoints
|
||||
// this is a fresh setup, no need to trigger healing.
|
||||
return Endpoints{}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
@@ -337,12 +320,12 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
bgSeq.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
bgSeq.queueHealTask(healSource{bucket: nopHeal}, madmin.HealItemMetadata)
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal - 'mc admin heal alias/ --verbose' to check the status.",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
@@ -383,15 +366,13 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
})
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
@@ -415,12 +396,15 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
}
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', disk was swapped again on %s pool: %w",
|
||||
disk, humanize.Ordinal(i+1), err))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
@@ -442,16 +426,19 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, tracker.QueuedBuckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
var buf bytes.Buffer
|
||||
tracker.printTo(&buf)
|
||||
logger.Info("Summary:\n%s", buf.String())
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
|
||||
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
|
||||
logger.Info("Summary:\n")
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.Info("\n")
|
||||
}
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
|
||||
@@ -57,6 +57,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
b.closeWithErr(err)
|
||||
return n, err
|
||||
}
|
||||
if n != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
b.closeWithErr(err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
|
||||
@@ -27,8 +27,10 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
|
||||
@@ -117,8 +119,10 @@ func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath strin
|
||||
// Close all the readers.
|
||||
func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
for _, r := range rs {
|
||||
if br, ok := r.(io.Closer); ok {
|
||||
br.Close()
|
||||
if r != nil {
|
||||
if br, ok := r.(io.Closer); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,8 +130,10 @@ func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
if w != nil {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -172,8 +178,8 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
return errFileCorrupt
|
||||
}
|
||||
|
||||
bufp := xlPoolSmall.Get().(*[]byte)
|
||||
defer xlPoolSmall.Put(bufp)
|
||||
bufp := xioutil.ODirectPoolSmall.Get().(*[]byte)
|
||||
defer xioutil.ODirectPoolSmall.Put(bufp)
|
||||
|
||||
for left > 0 {
|
||||
// Read expected hash...
|
||||
@@ -210,7 +216,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
// bitrotSelfTest tries to catch any issue in the bitrot implementation
|
||||
// early instead of silently corrupting data.
|
||||
func bitrotSelfTest() {
|
||||
var checksums = map[BitrotAlgorithm]string{
|
||||
checksums := map[BitrotAlgorithm]string{
|
||||
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
|
||||
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
|
||||
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/rest"
|
||||
"github.com/minio/pkg/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,8 +54,8 @@ type bootstrapRESTServer struct{}
|
||||
// ServerSystemConfig - captures information about server configuration.
|
||||
type ServerSystemConfig struct {
|
||||
MinioPlatform string
|
||||
MinioRuntime string
|
||||
MinioEndpoints EndpointServerPools
|
||||
MinioEnv map[string]string
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
@@ -82,15 +84,50 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
s2.MinioEndpoints[i].Endpoints[j])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s / Mismatch environment values: %s`, missing, mismatching)
|
||||
}
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s`, missing)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() ServerSystemConfig {
|
||||
envs := env.List("MINIO_")
|
||||
envValues := make(map[string]string, len(envs))
|
||||
for _, envK := range envs {
|
||||
// skip certain environment variables as part
|
||||
// of the whitelist and could be configured
|
||||
// differently on each nodes, update skipEnvs()
|
||||
// map if there are such environment values
|
||||
if _, ok := skipEnvs[envK]; ok {
|
||||
continue
|
||||
}
|
||||
envValues[envK] = env.Get(envK, "")
|
||||
}
|
||||
return ServerSystemConfig{
|
||||
MinioPlatform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
|
||||
MinioEndpoints: globalEndpoints,
|
||||
MinioEnv: envValues,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +138,6 @@ func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Reque
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// registerBootstrapRESTHandlers - register bootstrap rest router.
|
||||
@@ -169,11 +205,11 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
if isNetworkError(err) {
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
if !isNetworkError(err) {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
|
||||
}
|
||||
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
@@ -188,7 +224,9 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
if len(offlineEndpoints) > 0 {
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
}
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
@@ -224,7 +262,7 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
|
||||
@@ -18,12 +18,17 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
@@ -82,6 +87,19 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
||||
return
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
@@ -90,7 +108,21 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -170,10 +202,19 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Call site replication hook.
|
||||
//
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
bucketsse "github.com/minio/minio/internal/bucket/encryption"
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
)
|
||||
|
||||
// BucketSSEConfigSys - in-memory cache of bucket encryption config
|
||||
@@ -33,7 +33,7 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
}
|
||||
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*sse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
@@ -47,8 +47,8 @@ func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, e
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
|
||||
encConfig, err := bucketsse.ParseBucketSSEConfig(r)
|
||||
func validateBucketSSEConfig(r io.Reader) (*sse.BucketSSEConfig, error) {
|
||||
encConfig, err := sse.ParseBucketSSEConfig(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
@@ -38,8 +38,10 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
@@ -131,8 +133,6 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
ctx, cancel := g.WithCancelOnError(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
@@ -141,9 +141,12 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}, index)
|
||||
}
|
||||
|
||||
if err := g.WaitErr(); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
ctx := GlobalContext
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
@@ -208,7 +211,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -246,7 +249,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.URL.Query())
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.Form)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
return
|
||||
@@ -345,9 +348,6 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
@@ -357,10 +357,10 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
@@ -415,18 +415,23 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
const maxBodySize = 2 * 100000 * 1024
|
||||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjects, maxBodySize); err != nil {
|
||||
deleteObjectsReq := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjectsReq, maxBodySize); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objects := make([]ObjectV, len(deleteObjectsReq.Objects))
|
||||
// Convert object name delete objects if it has `/` in the beginning.
|
||||
for i := range deleteObjects.Objects {
|
||||
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
|
||||
for i := range deleteObjectsReq.Objects {
|
||||
deleteObjectsReq.Objects[i].ObjectName = trimLeadingSlash(deleteObjectsReq.Objects[i].ObjectName)
|
||||
objects[i] = deleteObjectsReq.Objects[i].ObjectV
|
||||
}
|
||||
|
||||
// Make sure to update context to print ObjectNames for multi objects.
|
||||
ctx = updateReqContext(ctx, objects...)
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
@@ -443,37 +448,49 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the list of objects is empty
|
||||
if len(deleteObjects.Objects) == 0 {
|
||||
// Return Malformed XML as S3 spec if the number of objects is empty
|
||||
if len(deleteObjectsReq.Objects) == 0 || len(deleteObjectsReq.Objects) > maxDeleteList {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
objectsToDelete := map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var (
|
||||
hasLockEnabled, replicateSync bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
hasLockEnabled bool
|
||||
dsc ReplicateDecision
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjectsReq.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
oss := make([]*objSweeper, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
|
||||
type deleteResult struct {
|
||||
delInfo DeletedObject
|
||||
errInfo DeleteError
|
||||
}
|
||||
|
||||
deleteResults := make([]deleteResult, len(deleteObjectsReq.Objects))
|
||||
|
||||
oss := make([]*objSweeper, len(deleteObjectsReq.Objects))
|
||||
|
||||
for index, object := range deleteObjectsReq.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
|
||||
return
|
||||
}
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -485,7 +502,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if _, err := uuid.Parse(object.VersionID); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
|
||||
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -495,49 +512,51 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(multiDelete(object))
|
||||
// Mutations of objects on versioning suspended buckets
|
||||
// affect its null version. Through opts below we select
|
||||
// the null version's remote object to delete if
|
||||
// transitioned.
|
||||
opts := oss[index].GetOpts()
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
if gerr == nil {
|
||||
oss[index].SetTransitionState(goi)
|
||||
opts := ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
}
|
||||
|
||||
if replicateDeletes || object.VersionID != "" && hasLockEnabled || !globalTierConfigMgr.Empty() {
|
||||
if !globalTierConfigMgr.Empty() && object.VersionID == "" && opts.VersionSuspended {
|
||||
opts.VersionID = nullVersionID
|
||||
}
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
}
|
||||
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(opts.VersionID).WithVersioning(versioned, suspended)
|
||||
oss[index].SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
replicateSync = repsync
|
||||
if replicate {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
dsc = checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
},
|
||||
}, goi, opts, gerr)
|
||||
if dsc.ReplicateAny() {
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
object.VersionPurgeStatuses = dsc.PendingStatus()
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
object.DeleteMarkerReplicationStatus = dsc.PendingStatus()
|
||||
}
|
||||
object.ReplicateDecisionStr = dsc.String()
|
||||
}
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
if hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
if object.VersionID != "" && hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,34 +576,40 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Disable timeouts and cancellation
|
||||
ctx = bgContext(ctx)
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
|
||||
for i := range errs {
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
},
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus(),
|
||||
VersionPurgeStatuses: dObjects[i].ReplicationState.VersionPurgeStatusInternal,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].ReplicationState.ReplicationStatusInternal,
|
||||
ReplicateDecisionStr: dObjects[i].ReplicationState.ReplicateDecisionStr,
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
dObjects[i].VersionPurgeStatus = deleteList[i].VersionPurgeStatus
|
||||
dObjects[i].ReplicationState = deleteList[i].ReplicationState()
|
||||
}
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
deleteResults[dindex].delInfo = dObjects[i]
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
dErrs[dindex] = DeleteError{
|
||||
deleteResults[dindex].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: deleteList[i].ObjectName,
|
||||
@@ -592,15 +617,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
var deleteErrors []DeleteError
|
||||
for _, dErr := range dErrs {
|
||||
if dErr.Code != "" {
|
||||
deleteErrors = append(deleteErrors, dErr)
|
||||
// Generate response
|
||||
deleteErrors := make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
||||
deletedObjects := make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
||||
for _, deleteResult := range deleteResults {
|
||||
if deleteResult.errInfo.Code != "" {
|
||||
deleteErrors = append(deleteErrors, deleteResult.errInfo)
|
||||
} else {
|
||||
deletedObjects = append(deletedObjects, deleteResult.delInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response
|
||||
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
|
||||
response := generateMultiDeleteResponse(deleteObjectsReq.Quiet, deletedObjects, deleteErrors)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
@@ -611,27 +639,23 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
|
||||
if dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending {
|
||||
dv := DeletedObjectReplicationInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
}
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI, replicateSync)
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Clean up transitioned objects from remote tier
|
||||
for _, os := range oss {
|
||||
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
@@ -654,6 +678,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
}
|
||||
|
||||
// Clean up transitioned objects from remote tier
|
||||
for _, os := range oss {
|
||||
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
}
|
||||
|
||||
// PutBucketHandler - PUT Bucket
|
||||
@@ -720,7 +752,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(ctx, bucket, false)
|
||||
objectAPI.DeleteBucket(context.Background(), bucket, DeleteBucketOptions{Force: false, NoRecreate: true})
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -763,6 +795,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
|
||||
if _, ok := err.(BucketExists); ok {
|
||||
// Though bucket exists locally, we send the site-replication
|
||||
// hook to ensure all sites have this bucket. If the hook
|
||||
// succeeds, the client will still receive a bucket exists
|
||||
// message.
|
||||
err2 := globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
if err2 != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -771,6 +814,13 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
// Load updated bucket metadata into memory.
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Call site replication hook
|
||||
err = globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
if cp := pathClean(r.URL.Path); cp != "" {
|
||||
w.Header().Set(xhttp.Location, cp) // Clean any trailing slashes.
|
||||
@@ -875,7 +925,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName))
|
||||
}
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
@@ -899,41 +949,18 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Once signature is validated, check if the user has
|
||||
// explicit permissions for the user.
|
||||
{
|
||||
token := formValues.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract claims if any.
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
@@ -994,7 +1021,10 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
|
||||
sseConfig.Apply(r.Header, globalAutoEncryption)
|
||||
sseConfig.Apply(r.Header, sse.ApplyOptions{
|
||||
AutoEncrypt: globalAutoEncryption,
|
||||
Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway,
|
||||
})
|
||||
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
@@ -1193,7 +1223,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
writeResponse(w, http.StatusOK, nil, mimeXML)
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
@@ -1240,6 +1270,17 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
switch {
|
||||
case err != nil:
|
||||
if _, ok := err.(BucketReplicationConfigNotFound); !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1254,7 +1295,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
|
||||
if err := deleteBucket(ctx, bucket, DeleteBucketOptions{Force: forceDelete}); err != nil {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
if _, ok := err.(BucketNotEmpty); ok {
|
||||
if globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket) {
|
||||
@@ -1263,7 +1304,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if globalDNSConfig != nil {
|
||||
if err2 := globalDNSConfig.Put(bucket); err2 != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, pl1ease fix it manually", err2))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2))
|
||||
}
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
@@ -1271,6 +1312,12 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
// Call site replication hook.
|
||||
if err := globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
@@ -1332,7 +1379,21 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, objectLockConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1398,6 +1459,12 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketTaggingAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
@@ -1417,7 +1484,21 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1485,7 +1566,15 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1521,7 +1610,10 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
|
||||
return
|
||||
@@ -1533,9 +1625,9 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, err := validateReplicationDestination(ctx, bucket, replicationConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig)
|
||||
if apiErr != noError {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
// Validate the received bucket replication config
|
||||
@@ -1548,7 +1640,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1623,7 +1715,11 @@ func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.Respons
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, nil); err != nil {
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1660,46 +1756,38 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(r.Context(), bucket)
|
||||
bucketReplStats := BucketReplicationStats{}
|
||||
// sum up metrics from each node in the cluster
|
||||
for _, bucketStat := range bucketStats {
|
||||
bucketReplStats.FailedCount += bucketStat.ReplicationStats.FailedCount
|
||||
bucketReplStats.FailedSize += bucketStat.ReplicationStats.FailedSize
|
||||
bucketReplStats.PendingCount += bucketStat.ReplicationStats.PendingCount
|
||||
bucketReplStats.PendingSize += bucketStat.ReplicationStats.PendingSize
|
||||
bucketReplStats.ReplicaSize += bucketStat.ReplicationStats.ReplicaSize
|
||||
bucketReplStats.ReplicatedSize += bucketStat.ReplicationStats.ReplicatedSize
|
||||
var usageInfo BucketUsageInfo
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil && !dataUsageInfo.LastUpdate.IsZero() {
|
||||
usageInfo = dataUsageInfo.BucketsUsage[bucket]
|
||||
}
|
||||
// add initial usage from the time of cluster up
|
||||
usageStat := globalReplicationStats.GetInitialUsage(bucket)
|
||||
bucketReplStats.FailedCount += usageStat.FailedCount
|
||||
bucketReplStats.FailedSize += usageStat.FailedSize
|
||||
bucketReplStats.PendingCount += usageStat.PendingCount
|
||||
bucketReplStats.PendingSize += usageStat.PendingSize
|
||||
bucketReplStats.ReplicaSize += usageStat.ReplicaSize
|
||||
bucketReplStats.ReplicatedSize += usageStat.ReplicatedSize
|
||||
|
||||
if err := json.NewEncoder(w).Encode(&bucketReplStats); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err = enc.Encode(getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStateHandler - starts a replication reset for all objects in a bucket which
|
||||
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
|
||||
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
|
||||
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced.
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationState")
|
||||
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
|
||||
// already in progress it returns an error
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
durationStr := r.URL.Query().Get("older-than")
|
||||
arn := r.URL.Query().Get("arn")
|
||||
resetID := r.URL.Query().Get("reset-id")
|
||||
if resetID == "" {
|
||||
resetID = mustGetUUID()
|
||||
}
|
||||
var (
|
||||
days time.Duration
|
||||
err error
|
||||
@@ -1713,6 +1801,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
}), r.URL)
|
||||
}
|
||||
}
|
||||
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -1740,9 +1829,32 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNoMatchingRuleError), r.URL)
|
||||
return
|
||||
}
|
||||
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, config.RoleArn)
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if len(tgtArns) > 1 && arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("ARN should be specified for replication reset"),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
var rinfo ResyncTargetsInfo
|
||||
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArns[0])
|
||||
target.ResetBeforeDate = UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
target.ResetID = mustGetUUID()
|
||||
target.ResetID = resetID
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
@@ -1750,23 +1862,91 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
}
|
||||
if err := startReplicationResync(ctx, bucket, arn, resetID, resetBeforeDate, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: err,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
data, err := json.Marshal(target.ResetID)
|
||||
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
|
||||
// This API is a MinIO only extension
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := r.URL.Query().Get("arn")
|
||||
var err error
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
|
||||
Arn: tarn,
|
||||
ResetID: st.ResyncID,
|
||||
StartTime: st.StartTime,
|
||||
EndTime: st.EndTime,
|
||||
ResyncStatus: st.ResyncStatus.String(),
|
||||
ReplicatedSize: st.ReplicatedSize,
|
||||
ReplicatedCount: st.ReplicatedCount,
|
||||
FailedSize: st.FailedSize,
|
||||
FailedCount: st.FailedCount,
|
||||
Bucket: st.Bucket,
|
||||
Object: st.Object,
|
||||
})
|
||||
}
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -80,8 +81,8 @@ func TestGetBucketLocationHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@@ -162,7 +163,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -209,7 +209,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -224,8 +223,8 @@ func TestHeadBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@@ -281,7 +280,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -296,7 +294,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -314,7 +311,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -330,8 +326,8 @@ func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
@@ -551,7 +547,6 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -567,8 +562,8 @@ func TestListBucketsHandler(t *testing.T) {
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
accessKey string
|
||||
@@ -614,7 +609,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -629,7 +623,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||
}
|
||||
@@ -645,7 +638,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -656,12 +648,12 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
var err error
|
||||
|
||||
contentBytes := []byte("hello")
|
||||
@@ -680,10 +672,35 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
objectNames = append(objectNames, objectName)
|
||||
}
|
||||
|
||||
for _, name := range []string{"private/object", "public/object"} {
|
||||
// Uploading the object with retention enabled
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, name, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %s: Error uploading object: <ERROR> %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// The following block will create a bucket policy with delete object to 'public/*'. This is
|
||||
// to test a mixed response of a successful & failure while deleting objects in a single request
|
||||
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
|
||||
credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusNoContent {
|
||||
t.Errorf("Expected the response status to be `%d`, but instead found `%d`", 200, rec.Code)
|
||||
}
|
||||
|
||||
getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) {
|
||||
for _, objectName := range objectNames {
|
||||
objectList = append(objectList, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: objectName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -702,9 +719,21 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
return deleteErrorList
|
||||
}
|
||||
|
||||
objects := []ObjectToDelete{}
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: "private/object",
|
||||
},
|
||||
})
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: "public/object",
|
||||
},
|
||||
})
|
||||
requestList := []DeleteObjectsRequest{
|
||||
{Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])},
|
||||
{Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])},
|
||||
{Quiet: false, Objects: objects},
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
@@ -741,6 +770,20 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
anonResponse := generateMultiDeleteResponse(requestList[0].Quiet, nil, getDeleteErrorList(requestList[0].Objects))
|
||||
encodedAnonResponse := encodeResponse(anonResponse)
|
||||
|
||||
anonRequestWithPartialPublicAccess := encodeResponse(requestList[2])
|
||||
anonResponseWithPartialPublicAccess := generateMultiDeleteResponse(requestList[2].Quiet,
|
||||
[]DeletedObject{
|
||||
{ObjectName: "public/object"},
|
||||
},
|
||||
[]DeleteError{
|
||||
{
|
||||
Code: errorCodes[ErrAccessDenied].Code,
|
||||
Message: errorCodes[ErrAccessDenied].Description,
|
||||
Key: "private/object",
|
||||
},
|
||||
})
|
||||
encodedAnonResponseWithPartialPublicAccess := encodeResponse(anonResponseWithPartialPublicAccess)
|
||||
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
objects []byte
|
||||
@@ -800,6 +843,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
expectedContent: encodedAnonResponse,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 6.
|
||||
// Anonymous user has access to some public folder, issue removing with
|
||||
// another private object as well
|
||||
{
|
||||
bucket: bucketName,
|
||||
objects: anonRequestWithPartialPublicAccess,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedContent: encodedAnonResponseWithPartialPublicAccess,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -91,7 +91,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -150,8 +150,8 @@ func TestBucketLifecycle(t *testing.T) {
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
// Tests are related and the order is important.
|
||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
creds auth.Credentials, t *testing.T) {
|
||||
|
||||
creds auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
@@ -266,8 +266,8 @@ func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRo
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}) {
|
||||
|
||||
},
|
||||
) {
|
||||
for i, testCase := range testCases {
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -75,49 +75,99 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
}
|
||||
|
||||
type expiryTask struct {
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
restoredObject bool
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
once sync.Once
|
||||
expiryCh chan expiryTask
|
||||
once sync.Once
|
||||
byDaysCh chan expiryTask
|
||||
byNewerNoncurrentCh chan newerNoncurrentTask
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
|
||||
// PendingTasks returns the number of pending ILM expiry tasks.
|
||||
func (es *expiryState) PendingTasks() int {
|
||||
return len(es.byDaysCh) + len(es.byNewerNoncurrentCh)
|
||||
}
|
||||
|
||||
// close closes work channels exactly once.
|
||||
func (es *expiryState) close() {
|
||||
es.once.Do(func() {
|
||||
close(es.byDaysCh)
|
||||
close(es.byNewerNoncurrentCh)
|
||||
})
|
||||
}
|
||||
|
||||
// enqueueByDays enqueues object versions expired by days for expiry.
|
||||
func (es *expiryState) enqueueByDays(oi ObjectInfo, restoredObject bool, rmVersion bool) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.once.Do(func() {
|
||||
close(es.expiryCh)
|
||||
})
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
|
||||
es.close()
|
||||
case es.byDaysCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion, restoredObject: restoredObject}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalExpiryState *expiryState
|
||||
)
|
||||
// enqueueByNewerNoncurrent enqueues object versions expired by
|
||||
// NewerNoncurrentVersions limit for expiry.
|
||||
func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.close()
|
||||
case es.byNewerNoncurrentCh <- newerNoncurrentTask{bucket: bucket, versions: versions}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var globalExpiryState *expiryState
|
||||
|
||||
func newExpiryState() *expiryState {
|
||||
return &expiryState{
|
||||
expiryCh: make(chan expiryTask, 10000),
|
||||
byDaysCh: make(chan expiryTask, 10000),
|
||||
byNewerNoncurrentCh: make(chan newerNoncurrentTask, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
|
||||
for t := range globalExpiryState.byDaysCh {
|
||||
if t.objInfo.TransitionedObject.Status != "" {
|
||||
applyExpiryOnTransitionedObject(ctx, objectAPI, t.objInfo, t.restoredObject)
|
||||
} else {
|
||||
applyExpiryOnNonTransitionedObjects(ctx, objectAPI, t.objInfo, t.versionExpiry)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for t := range globalExpiryState.byNewerNoncurrentCh {
|
||||
deleteObjectVersions(ctx, objectAPI, t.bucket, t.versions)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
|
||||
// by NewerNoncurrentVersions
|
||||
type newerNoncurrentTask struct {
|
||||
bucket string
|
||||
versions []ObjectToDelete
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
once sync.Once
|
||||
// add future metrics here
|
||||
once sync.Once
|
||||
transitionCh chan ObjectInfo
|
||||
|
||||
ctx context.Context
|
||||
objAPI ObjectLayer
|
||||
mu sync.Mutex
|
||||
numWorkers int
|
||||
killCh chan struct{}
|
||||
|
||||
activeTasks int32
|
||||
|
||||
lastDayMu sync.RWMutex
|
||||
lastDayStats map[string]*lastDayTierStats
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
@@ -131,67 +181,135 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
globalTransitionConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
)
|
||||
var globalTransitionState *transitionState
|
||||
|
||||
func newTransitionState() *transitionState {
|
||||
// fix minimum concurrent transition to 1 for single CPU setup
|
||||
if globalTransitionConcurrent == 0 {
|
||||
globalTransitionConcurrent = 1
|
||||
}
|
||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||
return &transitionState{
|
||||
transitionCh: make(chan ObjectInfo, 10000),
|
||||
ctx: ctx,
|
||||
objAPI: objAPI,
|
||||
killCh: make(chan struct{}),
|
||||
lastDayStats: make(map[string]*lastDayTierStats),
|
||||
}
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (t *transitionState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// PendingTasks returns the number of ILM transition tasks waiting for a worker
|
||||
// goroutine.
|
||||
func (t *transitionState) PendingTasks() int {
|
||||
return len(globalTransitionState.transitionCh)
|
||||
}
|
||||
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
}
|
||||
// ActiveTasks returns the number of active (ongoing) ILM transition tasks.
|
||||
func (t *transitionState) ActiveTasks() int {
|
||||
return int(atomic.LoadInt32(&t.activeTasks))
|
||||
}
|
||||
|
||||
// worker waits for transition tasks
|
||||
func (t *transitionState) worker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case <-t.killCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, 1)
|
||||
var tier string
|
||||
var err error
|
||||
if tier, err = transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
} else {
|
||||
ts := tierStats{
|
||||
TotalSize: uint64(oi.Size),
|
||||
NumVersions: 1,
|
||||
}
|
||||
if oi.IsLatest {
|
||||
ts.NumObjects = 1
|
||||
}
|
||||
t.addLastDayStats(tier, ts)
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, -1)
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transitionState) addLastDayStats(tier string, ts tierStats) {
|
||||
t.lastDayMu.Lock()
|
||||
defer t.lastDayMu.Unlock()
|
||||
|
||||
if _, ok := t.lastDayStats[tier]; !ok {
|
||||
t.lastDayStats[tier] = &lastDayTierStats{}
|
||||
}
|
||||
t.lastDayStats[tier].addStats(ts)
|
||||
}
|
||||
|
||||
func (t *transitionState) getDailyAllTierStats() dailyAllTierStats {
|
||||
t.lastDayMu.RLock()
|
||||
defer t.lastDayMu.RUnlock()
|
||||
|
||||
res := make(dailyAllTierStats, len(t.lastDayStats))
|
||||
for tier, st := range t.lastDayStats {
|
||||
res[tier] = st.clone()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// UpdateWorkers at the end of this function leaves n goroutines waiting for
|
||||
// transition tasks
|
||||
func (t *transitionState) UpdateWorkers(n int) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
for t.numWorkers < n {
|
||||
go t.worker(t.ctx, t.objAPI)
|
||||
t.numWorkers++
|
||||
}
|
||||
|
||||
for t.numWorkers > n {
|
||||
go func() { t.killCh <- struct{}{} }()
|
||||
t.numWorkers--
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundTransition(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalTransitionState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalTransitionConcurrent.
|
||||
for i := 0; i < globalTransitionConcurrent; i++ {
|
||||
globalTransitionState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
globalTransitionState = newTransitionState(ctx, objectAPI)
|
||||
n := globalAPIConfig.getTransitionWorkers()
|
||||
globalTransitionState.UpdateWorkers(n)
|
||||
}
|
||||
|
||||
var errInvalidStorageClass = errors.New("invalid storage class")
|
||||
|
||||
func validateTransitionTier(lc *lifecycle.Lifecycle) error {
|
||||
for _, rule := range lc.Rules {
|
||||
if rule.Transition.StorageClass == "" {
|
||||
continue
|
||||
if rule.Transition.StorageClass != "" {
|
||||
if valid := globalTierConfigMgr.IsTierValid(rule.Transition.StorageClass); !valid {
|
||||
return errInvalidStorageClass
|
||||
}
|
||||
}
|
||||
if valid := globalTierConfigMgr.IsTierValid(rule.Transition.StorageClass); !valid {
|
||||
return errInvalidStorageClass
|
||||
if rule.NoncurrentVersionTransition.StorageClass != "" {
|
||||
if valid := globalTierConfigMgr.IsTierValid(rule.NoncurrentVersionTransition.StorageClass); !valid {
|
||||
return errInvalidStorageClass
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enqueueTransitionImmediate enqueues obj for transition if eligible.
|
||||
// This is to be called after a successful upload of an object (version).
|
||||
func enqueueTransitionImmediate(obj ObjectInfo) {
|
||||
if lc, err := globalLifecycleSys.Get(obj.Bucket); err == nil {
|
||||
switch lc.ComputeAction(obj.ToLifecycleOpts()) {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
globalTransitionState.queueTransitionTask(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// expireAction represents different actions to be performed on expiry of a
|
||||
// restored/transitioned object
|
||||
type expireAction int
|
||||
@@ -214,14 +332,15 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
opts.Expiration = ExpirationOptions{Expire: true}
|
||||
switch action {
|
||||
case expireObj:
|
||||
// When an object is past expiry or when a transitioned object is being
|
||||
// deleted, 'mark' the data in the remote tier for delete.
|
||||
entry := jentry{
|
||||
ObjName: oi.transitionedObjName,
|
||||
VersionID: oi.transitionVersionID,
|
||||
TierName: oi.TransitionTier,
|
||||
ObjName: oi.TransitionedObject.Name,
|
||||
VersionID: oi.TransitionedObject.VersionID,
|
||||
TierName: oi.TransitionedObject.Tier,
|
||||
}
|
||||
if err := globalTierJournal.AddEntry(entry); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -283,27 +402,29 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) error {
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) (string, error) {
|
||||
lc, err := globalLifecycleSys.Get(oi.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
tier := lc.TransitionTier(oi.ToLifecycleOpts())
|
||||
opts := ObjectOptions{
|
||||
Transition: TransitionOptions{
|
||||
Status: lifecycle.TransitionPending,
|
||||
Tier: lc.TransitionTier(oi.ToLifecycleOpts()),
|
||||
Tier: tier,
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
return objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
return tier, objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionTier)
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("transition storage class not configured")
|
||||
}
|
||||
@@ -320,7 +441,7 @@ func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs
|
||||
gopts.length = length
|
||||
}
|
||||
|
||||
reader, err := tgtClient.Get(ctx, oi.transitionedObjName, remoteVersionID(oi.transitionVersionID), gopts)
|
||||
reader, err := tgtClient.Get(ctx, oi.TransitionedObject.Name, remoteVersionID(oi.TransitionedObject.VersionID), gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -340,9 +461,9 @@ const (
|
||||
|
||||
// Encryption specifies encryption setting on restored bucket
|
||||
type Encryption struct {
|
||||
EncryptionType sse.SSEAlgorithm `xml:"EncryptionType"`
|
||||
KMSContext string `xml:"KMSContext,omitempty"`
|
||||
KMSKeyID string `xml:"KMSKeyId,omitempty"`
|
||||
EncryptionType sse.Algorithm `xml:"EncryptionType"`
|
||||
KMSContext string `xml:"KMSContext,omitempty"`
|
||||
KMSKeyID string `xml:"KMSKeyId,omitempty"`
|
||||
}
|
||||
|
||||
// MetadataEntry denotes name and value.
|
||||
@@ -381,9 +502,7 @@ func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil
|
||||
}
|
||||
|
||||
var (
|
||||
selectParamsXMLName = "SelectParameters"
|
||||
)
|
||||
var selectParamsXMLName = "SelectParameters"
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
@@ -458,7 +577,7 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
|
||||
func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
versionSuspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
vid := strings.TrimSpace(r.URL.Query().Get(xhttp.VersionID))
|
||||
vid := strings.TrimSpace(r.Form.Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
if err != nil {
|
||||
@@ -495,7 +614,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
|
||||
if rreq.Type == SelectRestoreRequest {
|
||||
for _, v := range rreq.OutputLocation.S3.UserMetadata {
|
||||
if !strings.HasPrefix("x-amz-meta", strings.ToLower(v.Name)) {
|
||||
if !strings.HasPrefix(strings.ToLower(v.Name), "x-amz-meta") {
|
||||
meta["x-amz-meta-"+v.Name] = v.Value
|
||||
continue
|
||||
}
|
||||
@@ -544,7 +663,7 @@ func (fi FileInfo) IsRemote() bool {
|
||||
// IsRemote returns true if this object version's contents are in its remote
|
||||
// tier.
|
||||
func (oi ObjectInfo) IsRemote() bool {
|
||||
if oi.TransitionStatus != lifecycle.TransitionComplete {
|
||||
if oi.TransitionedObject.Status != lifecycle.TransitionComplete {
|
||||
return false
|
||||
}
|
||||
return !isRestoredObjectOnDisk(oi.UserDefined)
|
||||
@@ -575,9 +694,9 @@ func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
// String returns x-amz-restore compatible representation of r.
|
||||
func (r restoreObjStatus) String() string {
|
||||
if r.Ongoing() {
|
||||
return "ongoing-request=true"
|
||||
return `ongoing-request="true"`
|
||||
}
|
||||
return fmt.Sprintf("ongoing-request=false, expiry-date=%s", r.expiry.Format(http.TimeFormat))
|
||||
return fmt.Sprintf(`ongoing-request="false", expiry-date="%s"`, r.expiry.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Expiry returns expiry of restored object and true if restore-object has completed.
|
||||
@@ -621,12 +740,11 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
}
|
||||
|
||||
switch progressTokens[1] {
|
||||
case "true":
|
||||
case "true", `"true"`: // true without double quotes is deprecated in Feb 2022
|
||||
if len(tokens) == 1 {
|
||||
return ongoingRestoreObj(), nil
|
||||
}
|
||||
|
||||
case "false":
|
||||
case "false", `"false"`: // false without double quotes is deprecated in Feb 2022
|
||||
if len(tokens) != 2 {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
@@ -637,8 +755,7 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
if strings.TrimSpace(expiryTokens[0]) != "expiry-date" {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err := time.Parse(http.TimeFormat, expiryTokens[1])
|
||||
expiry, err := time.Parse(http.TimeFormat, strings.Trim(expiryTokens[1], `"`))
|
||||
if err != nil {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
@@ -662,17 +779,16 @@ func isRestoredObjectOnDisk(meta map[string]string) (onDisk bool) {
|
||||
// ToLifecycleOpts returns lifecycle.ObjectOpts value for oi.
|
||||
func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts {
|
||||
return lifecycle.ObjectOpts{
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionStatus,
|
||||
RemoteTiersImmediately: globalDebugRemoteTiersImmediately,
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
// valid: represents a restored object, 'pending' expiry.
|
||||
restoreHdr: "ongoing-request=false, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
restoreHdr: `ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"`,
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: false,
|
||||
expiry: time.Date(2012, 12, 21, 0, 0, 0, 0, time.UTC),
|
||||
@@ -45,7 +45,7 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// valid: represents an ongoing restore object request.
|
||||
restoreHdr: "ongoing-request=true",
|
||||
restoreHdr: `ongoing-request="true"`,
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: true,
|
||||
},
|
||||
@@ -53,13 +53,13 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// invalid; ongoing restore object request can't have expiry set on it.
|
||||
restoreHdr: "ongoing-request=true, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
restoreHdr: `ongoing-request="true", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"`,
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
{
|
||||
// invalid; completed restore object request must have expiry set on it.
|
||||
restoreHdr: "ongoing-request=false",
|
||||
restoreHdr: `ongoing-request="false"`,
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
|
||||
@@ -24,30 +24,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
_, cancel := g.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.WaitErr()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
@@ -61,8 +43,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
}
|
||||
|
||||
if encodingType != "" {
|
||||
// Only url encoding type is supported
|
||||
if strings.ToLower(encodingType) != "url" {
|
||||
// AWS S3 spec only supports 'url' encoding type
|
||||
if !strings.EqualFold(encodingType, "url") {
|
||||
return ErrInvalidEncodingMethod
|
||||
}
|
||||
}
|
||||
@@ -92,7 +74,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
urlValues := r.Form
|
||||
|
||||
// Extract all the listBucketVersions query params to their native values.
|
||||
prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues)
|
||||
@@ -118,7 +100,10 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectVersionsInfo.Objects, kms.BatchSize()); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -128,7 +113,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
|
||||
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
@@ -153,7 +138,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
urlValues := r.Form
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
|
||||
@@ -180,7 +165,10 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects, kms.BatchSize()); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -195,7 +183,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
@@ -220,7 +208,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
urlValues := r.Form
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
|
||||
@@ -255,7 +243,10 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects, kms.BatchSize()); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
@@ -305,7 +296,7 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
@@ -329,7 +320,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.URL.Query())
|
||||
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
@@ -352,7 +343,10 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsInfo.Objects, kms.BatchSize()); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -74,60 +74,22 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsGateway {
|
||||
// This code is needed only for gateway implementations.
|
||||
switch configFile {
|
||||
case bucketSSEConfig:
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.EncryptionConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.LifecycleConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.TaggingConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketNotificationConfig:
|
||||
if globalGatewayName == NASBackendGateway {
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.NotificationConfigXML = configData
|
||||
return meta.Save(GlobalContext, objAPI)
|
||||
}
|
||||
case bucketPolicyConfig:
|
||||
if globalIsGateway && globalGatewayName != NASBackendGateway {
|
||||
if configFile == bucketPolicyConfig {
|
||||
if configData == nil {
|
||||
return objAPI.DeleteBucketPolicy(GlobalContext, bucket)
|
||||
return objAPI.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return objAPI.SetBucketPolicy(GlobalContext, bucket, config)
|
||||
return objAPI.SetBucketPolicy(ctx, bucket, config)
|
||||
}
|
||||
return NotImplemented{}
|
||||
}
|
||||
@@ -136,9 +98,14 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
if !globalIsErasure && !globalIsDistErasure && errors.Is(err, errVolumeNotFound) {
|
||||
// Only single drive mode needs this fallback.
|
||||
meta = newBucketMetadata(bucket)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch configFile {
|
||||
@@ -181,12 +148,12 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if err := meta.Save(GlobalContext, objAPI); err != nil {
|
||||
if err := meta.Save(ctx, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Set(bucket, meta)
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
globalNotificationSys.LoadBucketMetadata(bgContext(ctx), bucket) // Do not use caller context here
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -220,7 +187,7 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
// GetVersioningConfig returns configured versioning config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -230,7 +197,7 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
@@ -246,7 +213,7 @@ func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error
|
||||
// GetObjectLockConfig returns configured object lock config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
@@ -278,7 +245,7 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
return meta.lifecycleConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucket}
|
||||
@@ -307,7 +274,7 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
return meta.notificationConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -317,7 +284,7 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
// GetSSEConfig returns configured SSE config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
@@ -341,7 +308,7 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
@@ -356,8 +323,8 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er
|
||||
|
||||
// GetQuotaConfig returns configured bucket quota
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -367,7 +334,7 @@ func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota
|
||||
// GetReplicationConfig returns configured bucket replication config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
@@ -384,7 +351,7 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s
|
||||
// GetBucketTargetsConfig returns configured bucket targets for this bucket
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -410,7 +377,7 @@ func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
@@ -430,7 +397,7 @@ func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
if ok {
|
||||
return meta, nil
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
return meta, err
|
||||
}
|
||||
@@ -447,9 +414,9 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we don't need to load the policies
|
||||
// from the backend.
|
||||
if globalIsGateway {
|
||||
// In gateway mode, we don't need to load bucket metadata except
|
||||
// NAS gateway backend.
|
||||
if globalIsGateway && !objAPI.IsNotificationSupported() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -467,14 +434,24 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
_, _ = objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
|
||||
// Ensure heal opts for bucket metadata be deep healed all the time.
|
||||
ScanMode: madmin.HealDeepScan,
|
||||
Recreate: true,
|
||||
})
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
|
||||
if err != nil {
|
||||
return err
|
||||
if !globalIsErasure && !globalIsDistErasure && errors.Is(err, errVolumeNotFound) {
|
||||
meta = newBucketMetadata(buckets[index].Name)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sys.Lock()
|
||||
sys.metadataMap[buckets[index].Name] = meta
|
||||
sys.Unlock()
|
||||
|
||||
globalNotificationSys.set(buckets[index], meta) // set notification targets
|
||||
|
||||
globalBucketTargetSys.set(buckets[index], meta) // set remote replication targets
|
||||
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
configFile := path.Join(bucketMetaPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -277,7 +277,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
for _, legacyFile := range legacyConfigs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
@@ -338,7 +338,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
for legacyFile := range configs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -365,7 +365,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, api, configFile, data)
|
||||
}
|
||||
|
||||
@@ -375,9 +375,10 @@ func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string)
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
path.Join(replicationDir, resyncFileName),
|
||||
}
|
||||
for _, metaFile := range metadataFiles {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, metaFile)
|
||||
configFile := path.Join(bucketMetaPrefix, bucket, metaFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
bucketConfigPrefix = "buckets"
|
||||
bucketNotificationConfig = "notification.xml"
|
||||
)
|
||||
|
||||
@@ -72,8 +71,8 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalServerRegion)
|
||||
if err = config.Validate(globalServerRegion, globalNotificationSys.targetList); err != nil {
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalNotificationSys.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
@@ -145,7 +144,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerRegion, globalNotificationSys.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
@@ -161,7 +160,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -58,6 +58,10 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
||||
// enforceRetentionForDeletion checks if it is appropriate to remove an
|
||||
// object according to locking configuration when this is lifecycle/ bucket quota asking.
|
||||
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
|
||||
if objInfo.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
|
||||
return true
|
||||
@@ -93,7 +97,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
||||
if gerr != nil { // error from GetObjectInfo
|
||||
switch gerr.(type) {
|
||||
case MethodNotAllowed: // This happens usually for a delete marker
|
||||
if oi.DeleteMarker {
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
// Delete marker should be present and valid.
|
||||
return ErrNone
|
||||
}
|
||||
@@ -175,68 +179,76 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool, claims map[string]interface{}) (ObjectInfo, APIErrorCode) {
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi ObjectInfo, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) error {
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return oi, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
|
||||
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
|
||||
// Pass in relative days from current time, to additionally
|
||||
// to verify "object-lock-remaining-retention-days" policy if any.
|
||||
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
// Retention has expired you may change whatever you like.
|
||||
if ret.RetainUntilDate.Before(t) {
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred,
|
||||
owner, claims)
|
||||
return oi, perm
|
||||
owner)
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch ret.Mode {
|
||||
case objectlock.RetGovernance:
|
||||
govPerm := isPutRetentionAllowed(bucket, object, days,
|
||||
govPerm := isPutRetentionAllowed(oi.Bucket, oi.Name, days,
|
||||
objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
byPassSet, r, cred, owner, claims)
|
||||
byPassSet, r, cred, owner)
|
||||
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
|
||||
if !byPassSet {
|
||||
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
}
|
||||
return oi, govPerm
|
||||
switch govPerm {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
case objectlock.RetCompliance:
|
||||
// Compliance retention mode cannot be changed or shortened.
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
|
||||
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
compliancePerm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
false, r, cred, owner, claims)
|
||||
return oi, compliancePerm
|
||||
false, r, cred, owner)
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return oi, ErrNone
|
||||
return nil
|
||||
} // No pre-existing retention metadata present.
|
||||
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred, owner, claims)
|
||||
return oi, perm
|
||||
objRetention.Mode, byPassSet, r, cred, owner)
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
|
||||
|
||||
@@ -18,12 +18,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
@@ -76,7 +79,13 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
|
||||
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -94,7 +103,17 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Policy: bucketPolicyBytes,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -129,7 +148,16 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err := globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
@@ -36,60 +37,118 @@ import (
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonWriteOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketMultipartUploadsAction,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketMultipartUploadsAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonReadOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling Create Bucket and ensure we get one and only one success.
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucketWithLocation"})
|
||||
}
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
start := make(chan struct{})
|
||||
var ok, errs int
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Sync start.
|
||||
<-start
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
t.Logf("unexpected error: %T: %v", err, err)
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
errs++
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
ok++
|
||||
mu.Unlock()
|
||||
}()
|
||||
}
|
||||
close(start)
|
||||
wg.Wait()
|
||||
if ok != 1 {
|
||||
t.Fatalf("want 1 ok, got %d", ok)
|
||||
}
|
||||
if errs != n-1 {
|
||||
t.Fatalf("want %d errors, got %d", n-1, errs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,8 +159,8 @@ func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -286,7 +345,6 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -305,14 +363,12 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
@@ -418,7 +474,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -493,7 +548,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -512,7 +566,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -528,8 +581,8 @@ func TestDeleteBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{
|
||||
"Version": "2012-10-17",
|
||||
@@ -696,7 +749,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -715,7 +767,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
@@ -77,10 +77,10 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
|
||||
vid := r.URL.Query().Get("versionId")
|
||||
vid := r.Form.Get(xhttp.VersionID)
|
||||
if vid == "" {
|
||||
if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil {
|
||||
vid = u.Query().Get("versionId")
|
||||
vid = u.Query().Get(xhttp.VersionID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,8 +143,8 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
|
||||
var cloneURLValues = url.Values{}
|
||||
for k, v := range r.URL.Query() {
|
||||
cloneURLValues := make(url.Values, len(r.Form))
|
||||
for k, v := range r.Form {
|
||||
cloneURLValues[k] = v
|
||||
}
|
||||
|
||||
@@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
switch k {
|
||||
case ldapUser:
|
||||
args["user"] = []string{vStr}
|
||||
} else if k == ldapUserN {
|
||||
case ldapUserN:
|
||||
args["username"] = []string{vStr}
|
||||
} else {
|
||||
default:
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
@@ -199,7 +200,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
|
||||
}
|
||||
|
||||
var policyInfo miniogopolicy.BucketAccessPolicy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
@@ -217,7 +218,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
|
||||
}
|
||||
|
||||
var bucketPolicy policy.Policy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
|
||||
@@ -20,11 +20,11 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ type BucketQuotaSys struct {
|
||||
}
|
||||
|
||||
// Get - Get quota configuration.
|
||||
func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
|
||||
func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.BucketQuota, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
@@ -43,7 +43,7 @@ func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
|
||||
return &madmin.BucketQuota{}, nil
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetQuotaConfig(bucketName)
|
||||
return globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName)
|
||||
}
|
||||
|
||||
// NewBucketQuotaSys returns initialized BucketQuotaSys
|
||||
@@ -51,6 +51,35 @@ func NewBucketQuotaSys() *BucketQuotaSys {
|
||||
return &BucketQuotaSys{}
|
||||
}
|
||||
|
||||
// Init initialize bucket quota.
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
if err != nil {
|
||||
return BucketUsageInfo{}, err
|
||||
}
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if !ok {
|
||||
return BucketUsageInfo{}, fmt.Errorf("internal error: Unexpected DUI data type: %T", v)
|
||||
}
|
||||
|
||||
bui := dui.BucketsUsage[bucket]
|
||||
return bui, nil
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
|
||||
quotaCfg = &madmin.BucketQuota{}
|
||||
@@ -58,47 +87,32 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
return quotaCfg, err
|
||||
}
|
||||
if !quotaCfg.IsValid() {
|
||||
if quotaCfg.Type == "fifo" {
|
||||
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"))
|
||||
return quotaCfg, nil
|
||||
}
|
||||
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
q, err := sys.Get(bucket)
|
||||
q, err := sys.Get(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dui := v.(madmin.DataUsageInfo)
|
||||
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket not found, cannot enforce quota
|
||||
// call will fail anyways later.
|
||||
return nil
|
||||
}
|
||||
|
||||
if (bui.Size + uint64(size)) >= q.Quota {
|
||||
if bui.Size > 0 && ((bui.Size + uint64(size)) >= q.Quota) {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
@@ -106,120 +120,9 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
func enforceBucketQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if globalBucketQuotaSys == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return globalBucketQuotaSys.check(ctx, bucket, size)
|
||||
}
|
||||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota.
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui madmin.BucketUsageInfo) {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
return
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if bui.Size > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = bui.Size - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
for obj := range objInfoCh {
|
||||
if obj.DeleteMarker {
|
||||
// Delete markers are automatically added for FIFO purge.
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
continue
|
||||
}
|
||||
// skip objects currently under retention
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
}
|
||||
|
||||
// If we saw less than quota we are good.
|
||||
if scorer.seenBytes <= cfg.Quota {
|
||||
return
|
||||
}
|
||||
// Calculate how much we want to delete now.
|
||||
toFreeNow := scorer.seenBytes - cfg.Quota
|
||||
// We were less over quota than we thought. Adjust so we delete less.
|
||||
// If we are more over, leave it for the next run to pick up.
|
||||
if toFreeNow < toFree {
|
||||
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
|
||||
// We got below or at quota.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var objects []ObjectToDelete
|
||||
numKeys := len(scorer.fileObjInfos())
|
||||
for i, obj := range scorer.fileObjInfos() {
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectName: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
if len(objects) < maxDeleteList && (i < numKeys-1) {
|
||||
// skip deletion until maxDeleteList or end of slice
|
||||
continue
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Deletes a list of objects.
|
||||
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
})
|
||||
for i := range deleteErrs {
|
||||
if deleteErrs[i] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: obj,
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
return globalBucketQuotaSys.enforceQuotaHard(ctx, bucket, size)
|
||||
}
|
||||
|
||||
@@ -20,16 +20,18 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
)
|
||||
|
||||
func (b *BucketReplicationStats) hasReplicationUsage() bool {
|
||||
return b.FailedSize > 0 ||
|
||||
b.ReplicatedSize > 0 ||
|
||||
b.ReplicaSize > 0 ||
|
||||
b.FailedCount > 0
|
||||
for _, s := range b.Stats {
|
||||
if s.hasReplicationUsage() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ReplicationStats holds the global in-memory replication stats
|
||||
@@ -37,6 +39,7 @@ type ReplicationStats struct {
|
||||
Cache map[string]*BucketReplicationStats
|
||||
UsageCache map[string]*BucketReplicationStats
|
||||
sync.RWMutex
|
||||
ulock sync.RWMutex
|
||||
}
|
||||
|
||||
// Delete deletes in-memory replication statistics for a bucket.
|
||||
@@ -48,50 +51,74 @@ func (r *ReplicationStats) Delete(bucket string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
delete(r.Cache, bucket)
|
||||
delete(r.UsageCache, bucket)
|
||||
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
delete(r.UsageCache, bucket)
|
||||
}
|
||||
|
||||
// Update updates in-memory replication statistics with new values.
|
||||
func (r *ReplicationStats) Update(bucket string, n int64, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
||||
func (r *ReplicationStats) UpdateReplicaStat(bucket string, n int64) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
b, ok := r.Cache[bucket]
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
bs, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
b = &BucketReplicationStats{}
|
||||
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
}
|
||||
bs.ReplicaSize += n
|
||||
r.Cache[bucket] = bs
|
||||
}
|
||||
|
||||
// Update updates in-memory replication statistics with new values.
|
||||
func (r *ReplicationStats) Update(bucket string, arn string, n int64, duration time.Duration, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
bs, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
r.Cache[bucket] = bs
|
||||
}
|
||||
b, ok := bs.Stats[arn]
|
||||
if !ok {
|
||||
b = &BucketReplicationStat{}
|
||||
bs.Stats[arn] = b
|
||||
}
|
||||
r.RUnlock()
|
||||
switch status {
|
||||
case replication.Completed:
|
||||
switch prevStatus { // adjust counters based on previous state
|
||||
case replication.Failed:
|
||||
atomic.AddUint64(&b.FailedCount, ^uint64(0))
|
||||
b.FailedCount--
|
||||
}
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddUint64(&b.ReplicatedSize, uint64(n))
|
||||
b.ReplicatedSize += n
|
||||
switch prevStatus {
|
||||
case replication.Failed:
|
||||
atomic.AddUint64(&b.FailedSize, ^uint64(n-1))
|
||||
b.FailedSize -= n
|
||||
}
|
||||
if duration > 0 {
|
||||
b.Latency.update(n, duration)
|
||||
}
|
||||
}
|
||||
case replication.Failed:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
if prevStatus == replication.Pending {
|
||||
atomic.AddUint64(&b.FailedSize, uint64(n))
|
||||
atomic.AddUint64(&b.FailedCount, 1)
|
||||
b.FailedSize += n
|
||||
b.FailedCount++
|
||||
}
|
||||
}
|
||||
case replication.Replica:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddUint64(&b.ReplicaSize, uint64(n))
|
||||
b.ReplicaSize += n
|
||||
}
|
||||
}
|
||||
r.Lock()
|
||||
r.Cache[bucket] = b
|
||||
r.Unlock()
|
||||
}
|
||||
|
||||
// GetInitialUsage get replication metrics available at the time of cluster initialization
|
||||
@@ -99,26 +126,19 @@ func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats
|
||||
if r == nil {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
|
||||
r.ulock.RLock()
|
||||
defer r.ulock.RUnlock()
|
||||
st, ok := r.UsageCache[bucket]
|
||||
if !ok {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
return BucketReplicationStats{
|
||||
FailedSize: atomic.LoadUint64(&st.FailedSize),
|
||||
ReplicatedSize: atomic.LoadUint64(&st.ReplicatedSize),
|
||||
ReplicaSize: atomic.LoadUint64(&st.ReplicaSize),
|
||||
FailedCount: atomic.LoadUint64(&st.FailedCount),
|
||||
}
|
||||
return st.Clone()
|
||||
}
|
||||
|
||||
// Get replication metrics for a bucket from this node since this node came up.
|
||||
func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
||||
if r == nil {
|
||||
return BucketReplicationStats{}
|
||||
return BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
@@ -128,43 +148,61 @@ func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
||||
if !ok {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
|
||||
return BucketReplicationStats{
|
||||
FailedSize: atomic.LoadUint64(&st.FailedSize),
|
||||
ReplicatedSize: atomic.LoadUint64(&st.ReplicatedSize),
|
||||
ReplicaSize: atomic.LoadUint64(&st.ReplicaSize),
|
||||
FailedCount: atomic.LoadUint64(&st.FailedCount),
|
||||
}
|
||||
return st.Clone()
|
||||
}
|
||||
|
||||
// NewReplicationStats initialize in-memory replication statistics
|
||||
func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *ReplicationStats {
|
||||
st := &ReplicationStats{
|
||||
return &ReplicationStats{
|
||||
Cache: make(map[string]*BucketReplicationStats),
|
||||
UsageCache: make(map[string]*BucketReplicationStats),
|
||||
}
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
return st
|
||||
}
|
||||
|
||||
// data usage has not captured any data yet.
|
||||
if dataUsageInfo.LastUpdate.IsZero() {
|
||||
return st
|
||||
}
|
||||
|
||||
for bucket, usage := range dataUsageInfo.BucketsUsage {
|
||||
b := &BucketReplicationStats{
|
||||
FailedSize: usage.ReplicationFailedSize,
|
||||
ReplicatedSize: usage.ReplicatedSize,
|
||||
ReplicaSize: usage.ReplicaSize,
|
||||
FailedCount: usage.ReplicationFailedCount,
|
||||
}
|
||||
if b.hasReplicationUsage() {
|
||||
st.UsageCache[bucket] = b
|
||||
}
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
// load replication metrics at cluster start from initial data usage
|
||||
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
||||
rTimer := time.NewTimer(time.Minute * 1)
|
||||
defer rTimer.Stop()
|
||||
var (
|
||||
dui DataUsageInfo
|
||||
err error
|
||||
)
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-rTimer.C:
|
||||
dui, err = loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// If LastUpdate is set, data usage is available.
|
||||
if !dui.LastUpdate.IsZero() {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m := make(map[string]*BucketReplicationStats)
|
||||
for bucket, usage := range dui.BucketsUsage {
|
||||
b := &BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(usage.ReplicationInfo)),
|
||||
}
|
||||
for arn, uinfo := range usage.ReplicationInfo {
|
||||
b.Stats[arn] = &BucketReplicationStat{
|
||||
FailedSize: int64(uinfo.ReplicationFailedSize),
|
||||
ReplicatedSize: int64(uinfo.ReplicatedSize),
|
||||
ReplicaSize: int64(uinfo.ReplicaSize),
|
||||
FailedCount: int64(uinfo.ReplicationFailedCount),
|
||||
}
|
||||
}
|
||||
b.ReplicaSize += int64(usage.ReplicaSize)
|
||||
if b.hasReplicationUsage() {
|
||||
m[bucket] = b
|
||||
}
|
||||
}
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
r.UsageCache = m
|
||||
}
|
||||
|
||||
726
cmd/bucket-replication-utils.go
Normal file
726
cmd/bucket-replication-utils.go
Normal file
@@ -0,0 +1,726 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
|
||||
//go:generate msgp -file=$GOFILE
|
||||
|
||||
// replicatedTargetInfo struct represents replication info on a target
|
||||
type replicatedTargetInfo struct {
|
||||
Arn string
|
||||
Size int64
|
||||
Duration time.Duration
|
||||
ReplicationAction replicationAction // full or metadata only
|
||||
OpType replication.Type // whether incoming replication, existing object, healing etc..
|
||||
ReplicationStatus replication.StatusType
|
||||
PrevReplicationStatus replication.StatusType
|
||||
VersionPurgeStatus VersionPurgeStatusType
|
||||
ResyncTimestamp string
|
||||
ReplicationResynced bool // true only if resync attempted for this target
|
||||
}
|
||||
|
||||
// Empty returns true for a target if arn is empty
|
||||
func (rt replicatedTargetInfo) Empty() bool {
|
||||
return rt.Arn == ""
|
||||
}
|
||||
|
||||
type replicatedInfos struct {
|
||||
ReplicationTimeStamp time.Time
|
||||
Targets []replicatedTargetInfo
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) CompletedSize() (sz int64) {
|
||||
for _, t := range ri.Targets {
|
||||
if t.Empty() {
|
||||
continue
|
||||
}
|
||||
if t.ReplicationStatus == replication.Completed && t.PrevReplicationStatus != replication.Completed {
|
||||
sz += t.Size
|
||||
}
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// ReplicationAttempted returns true if replication was attempted on any of the targets for the object version
|
||||
// queued
|
||||
func (ri replicatedInfos) ReplicationResynced() bool {
|
||||
for _, t := range ri.Targets {
|
||||
if t.Empty() || !t.ReplicationResynced {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) ReplicationStatusInternal() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, t := range ri.Targets {
|
||||
if t.Empty() {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "%s=%s;", t.Arn, t.ReplicationStatus.String())
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) ReplicationStatus() replication.StatusType {
|
||||
if len(ri.Targets) == 0 {
|
||||
return replication.StatusType("")
|
||||
}
|
||||
completed := 0
|
||||
for _, v := range ri.Targets {
|
||||
switch v.ReplicationStatus {
|
||||
case replication.Failed:
|
||||
return replication.Failed
|
||||
case replication.Completed:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
if completed == len(ri.Targets) {
|
||||
return replication.Completed
|
||||
}
|
||||
return replication.Pending
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) VersionPurgeStatus() VersionPurgeStatusType {
|
||||
if len(ri.Targets) == 0 {
|
||||
return VersionPurgeStatusType("")
|
||||
}
|
||||
completed := 0
|
||||
for _, v := range ri.Targets {
|
||||
switch v.VersionPurgeStatus {
|
||||
case Failed:
|
||||
return Failed
|
||||
case Complete:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
if completed == len(ri.Targets) {
|
||||
return Complete
|
||||
}
|
||||
return Pending
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) VersionPurgeStatusInternal() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, t := range ri.Targets {
|
||||
if t.Empty() {
|
||||
continue
|
||||
}
|
||||
if t.VersionPurgeStatus.Empty() {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "%s=%s;", t.Arn, t.VersionPurgeStatus)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (ri replicatedInfos) Action() replicationAction {
|
||||
for _, t := range ri.Targets {
|
||||
if t.Empty() {
|
||||
continue
|
||||
}
|
||||
// rely on replication action from target that actually performed replication now.
|
||||
if t.PrevReplicationStatus != replication.Completed {
|
||||
return t.ReplicationAction
|
||||
}
|
||||
}
|
||||
return replicateNone
|
||||
}
|
||||
|
||||
var replStatusRegex = regexp.MustCompile(`([^=].*?)=([^,].*?);`)
|
||||
|
||||
// TargetReplicationStatus - returns replication status of a target
|
||||
func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.StatusType) {
|
||||
repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1)
|
||||
for _, repStatMatch := range repStatMatches {
|
||||
if len(repStatMatch) != 3 {
|
||||
return
|
||||
}
|
||||
if repStatMatch[1] == arn {
|
||||
return replication.StatusType(repStatMatch[2])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type replicateTargetDecision struct {
|
||||
Replicate bool // Replicate to this target
|
||||
Synchronous bool // Synchronous replication configured.
|
||||
Arn string // ARN of replication target
|
||||
ID string
|
||||
}
|
||||
|
||||
func (t *replicateTargetDecision) String() string {
|
||||
return fmt.Sprintf("%t;%t;%s;%s", t.Replicate, t.Synchronous, t.Arn, t.ID)
|
||||
}
|
||||
|
||||
func newReplicateTargetDecision(arn string, replicate bool, sync bool) replicateTargetDecision {
|
||||
d := replicateTargetDecision{
|
||||
Replicate: replicate,
|
||||
Synchronous: sync,
|
||||
Arn: arn,
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// ReplicateDecision represents replication decision for each target
|
||||
type ReplicateDecision struct {
|
||||
targetsMap map[string]replicateTargetDecision
|
||||
}
|
||||
|
||||
// ReplicateAny returns true if atleast one target qualifies for replication
|
||||
func (d *ReplicateDecision) ReplicateAny() bool {
|
||||
for _, t := range d.targetsMap {
|
||||
if t.Replicate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Synchronous returns true if atleast one target qualifies for synchronous replication
|
||||
func (d *ReplicateDecision) Synchronous() bool {
|
||||
for _, t := range d.targetsMap {
|
||||
if t.Synchronous {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *ReplicateDecision) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for key, value := range d.targetsMap {
|
||||
fmt.Fprintf(b, "%s=%s,", key, value.String())
|
||||
}
|
||||
return strings.TrimSuffix(b.String(), ",")
|
||||
}
|
||||
|
||||
// Set updates ReplicateDecision with target's replication decision
|
||||
func (d *ReplicateDecision) Set(t replicateTargetDecision) {
|
||||
if d.targetsMap == nil {
|
||||
d.targetsMap = make(map[string]replicateTargetDecision)
|
||||
}
|
||||
d.targetsMap[t.Arn] = t
|
||||
}
|
||||
|
||||
// PendingStatus returns a stringified representation of internal replication status with all targets marked as `PENDING`
|
||||
func (d *ReplicateDecision) PendingStatus() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, k := range d.targetsMap {
|
||||
if k.Replicate {
|
||||
fmt.Fprintf(b, "%s=%s;", k.Arn, replication.Pending.String())
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// ResyncDecision is a struct representing a map with target's individual resync decisions
|
||||
type ResyncDecision struct {
|
||||
targets map[string]ResyncTargetDecision
|
||||
}
|
||||
|
||||
// Empty returns true if no targets with resync decision present
|
||||
func (r *ResyncDecision) Empty() bool {
|
||||
return r.targets == nil
|
||||
}
|
||||
|
||||
func (r *ResyncDecision) mustResync() bool {
|
||||
for _, v := range r.targets {
|
||||
if v.Replicate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *ResyncDecision) mustResyncTarget(tgtArn string) bool {
|
||||
if r.targets == nil {
|
||||
return false
|
||||
}
|
||||
v, ok := r.targets[tgtArn]
|
||||
if ok && v.Replicate {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ResyncTargetDecision is struct that represents resync decision for this target
|
||||
type ResyncTargetDecision struct {
|
||||
Replicate bool
|
||||
ResetID string
|
||||
ResetBeforeDate time.Time
|
||||
}
|
||||
|
||||
var errInvalidReplicateDecisionFormat = fmt.Errorf("ReplicateDecision has invalid format")
|
||||
|
||||
// parse k-v pairs of target ARN to stringified ReplicateTargetDecision delimited by ',' into a
|
||||
// ReplicateDecision struct
|
||||
func parseReplicateDecision(s string) (r ReplicateDecision, err error) {
|
||||
r = ReplicateDecision{
|
||||
targetsMap: make(map[string]replicateTargetDecision),
|
||||
}
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
pairs := strings.Split(s, ",")
|
||||
for _, p := range pairs {
|
||||
slc := strings.Split(p, "=")
|
||||
if len(slc) != 2 {
|
||||
return r, errInvalidReplicateDecisionFormat
|
||||
}
|
||||
tgtStr := strings.TrimPrefix(slc[1], "\"")
|
||||
tgtStr = strings.TrimSuffix(tgtStr, "\"")
|
||||
tgt := strings.Split(tgtStr, ";")
|
||||
if len(tgt) != 4 {
|
||||
return r, errInvalidReplicateDecisionFormat
|
||||
}
|
||||
var replicate, sync bool
|
||||
var err error
|
||||
replicate, err = strconv.ParseBool(tgt[0])
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
sync, err = strconv.ParseBool(tgt[1])
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: replicate, Synchronous: sync, Arn: tgt[2], ID: tgt[3]}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReplicationState represents internal replication state
|
||||
type ReplicationState struct {
|
||||
ReplicaTimeStamp time.Time // timestamp when last replica update was received
|
||||
ReplicaStatus replication.StatusType // replica statusstringis
|
||||
DeleteMarker bool // represents DeleteMarker replication state
|
||||
ReplicationTimeStamp time.Time // timestamp when last replication activity happened
|
||||
ReplicationStatusInternal string // stringified representation of all replication activity
|
||||
// VersionPurgeStatusInternal is internally in the format "arn1=PENDING;arn2=COMMPLETED;"
|
||||
VersionPurgeStatusInternal string // stringified representation of all version purge statuses
|
||||
ReplicateDecisionStr string // stringified representation of replication decision for each target
|
||||
Targets map[string]replication.StatusType // map of ARN->replication status for ongoing replication activity
|
||||
PurgeTargets map[string]VersionPurgeStatusType // map of ARN->VersionPurgeStatus for all the targets
|
||||
ResetStatusesMap map[string]string // map of ARN-> stringified reset id and timestamp for all the targets
|
||||
}
|
||||
|
||||
// Equal returns true if replication state is identical for version purge statuses and (replica)tion statuses.
|
||||
func (rs *ReplicationState) Equal(o ReplicationState) bool {
|
||||
return rs.ReplicaStatus == o.ReplicaStatus &&
|
||||
rs.ReplicaTimeStamp.Equal(o.ReplicaTimeStamp) &&
|
||||
rs.ReplicationTimeStamp.Equal(o.ReplicationTimeStamp) &&
|
||||
rs.ReplicationStatusInternal == o.ReplicationStatusInternal &&
|
||||
rs.VersionPurgeStatusInternal == o.VersionPurgeStatusInternal
|
||||
}
|
||||
|
||||
// CompositeReplicationStatus returns overall replication status for the object version being replicated.
|
||||
func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusType) {
|
||||
switch {
|
||||
case rs.ReplicationStatusInternal != "":
|
||||
switch replication.StatusType(rs.ReplicationStatusInternal) {
|
||||
case replication.Pending, replication.Completed, replication.Failed, replication.Replica: // for backward compatibility
|
||||
return replication.StatusType(rs.ReplicationStatusInternal)
|
||||
default:
|
||||
replStatus := getCompositeReplicationStatus(rs.Targets)
|
||||
// return REPLICA status if replica received timestamp is later than replication timestamp
|
||||
// provided object replication completed for all targets.
|
||||
if !rs.ReplicaTimeStamp.Equal(timeSentinel) && replStatus == replication.Completed && rs.ReplicaTimeStamp.After(rs.ReplicationTimeStamp) {
|
||||
return rs.ReplicaStatus
|
||||
}
|
||||
return replStatus
|
||||
}
|
||||
case !rs.ReplicaStatus.Empty():
|
||||
return rs.ReplicaStatus
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CompositeVersionPurgeStatus returns overall replication purge status for the permanent delete being replicated.
|
||||
func (rs *ReplicationState) CompositeVersionPurgeStatus() VersionPurgeStatusType {
|
||||
switch VersionPurgeStatusType(rs.VersionPurgeStatusInternal) {
|
||||
case Pending, Complete, Failed: // for backward compatibility
|
||||
return VersionPurgeStatusType(rs.VersionPurgeStatusInternal)
|
||||
default:
|
||||
return getCompositeVersionPurgeStatus(rs.PurgeTargets)
|
||||
}
|
||||
}
|
||||
|
||||
// TargetState returns replicatedInfos struct initialized with the previous state of replication
|
||||
func (rs *ReplicationState) targetState(arn string) (r replicatedTargetInfo) {
|
||||
return replicatedTargetInfo{
|
||||
Arn: arn,
|
||||
PrevReplicationStatus: rs.Targets[arn],
|
||||
VersionPurgeStatus: rs.PurgeTargets[arn],
|
||||
ResyncTimestamp: rs.ResetStatusesMap[arn],
|
||||
}
|
||||
}
|
||||
|
||||
// getReplicationState returns replication state using target replicated info for the targets
|
||||
func getReplicationState(rinfos replicatedInfos, prevState ReplicationState, vID string) ReplicationState {
|
||||
rs := ReplicationState{
|
||||
ReplicateDecisionStr: prevState.ReplicateDecisionStr,
|
||||
ResetStatusesMap: prevState.ResetStatusesMap,
|
||||
ReplicaTimeStamp: prevState.ReplicaTimeStamp,
|
||||
ReplicaStatus: prevState.ReplicaStatus,
|
||||
}
|
||||
var replStatuses, vpurgeStatuses string
|
||||
replStatuses = rinfos.ReplicationStatusInternal()
|
||||
rs.Targets = replicationStatusesMap(replStatuses)
|
||||
rs.ReplicationStatusInternal = replStatuses
|
||||
rs.ReplicationTimeStamp = rinfos.ReplicationTimeStamp
|
||||
|
||||
vpurgeStatuses = rinfos.VersionPurgeStatusInternal()
|
||||
rs.VersionPurgeStatusInternal = vpurgeStatuses
|
||||
rs.PurgeTargets = versionPurgeStatusesMap(vpurgeStatuses)
|
||||
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ResyncTimestamp != "" {
|
||||
rs.ResetStatusesMap[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
|
||||
}
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
// constructs a replication status map from string representation
|
||||
func replicationStatusesMap(s string) map[string]replication.StatusType {
|
||||
targets := make(map[string]replication.StatusType)
|
||||
repStatMatches := replStatusRegex.FindAllStringSubmatch(s, -1)
|
||||
for _, repStatMatch := range repStatMatches {
|
||||
if len(repStatMatch) != 3 {
|
||||
continue
|
||||
}
|
||||
status := replication.StatusType(repStatMatch[2])
|
||||
targets[repStatMatch[1]] = status
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
||||
// constructs a version purge status map from string representation
|
||||
func versionPurgeStatusesMap(s string) map[string]VersionPurgeStatusType {
|
||||
targets := make(map[string]VersionPurgeStatusType)
|
||||
purgeStatusMatches := replStatusRegex.FindAllStringSubmatch(s, -1)
|
||||
for _, purgeStatusMatch := range purgeStatusMatches {
|
||||
if len(purgeStatusMatch) != 3 {
|
||||
continue
|
||||
}
|
||||
targets[purgeStatusMatch[1]] = VersionPurgeStatusType(purgeStatusMatch[2])
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
||||
// return the overall replication status for all the targets
|
||||
func getCompositeReplicationStatus(m map[string]replication.StatusType) replication.StatusType {
|
||||
if len(m) == 0 {
|
||||
return replication.StatusType("")
|
||||
}
|
||||
completed := 0
|
||||
for _, v := range m {
|
||||
switch v {
|
||||
case replication.Failed:
|
||||
return replication.Failed
|
||||
case replication.Completed:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
if completed == len(m) {
|
||||
return replication.Completed
|
||||
}
|
||||
return replication.Pending
|
||||
}
|
||||
|
||||
// return the overall version purge status for all the targets
|
||||
func getCompositeVersionPurgeStatus(m map[string]VersionPurgeStatusType) VersionPurgeStatusType {
|
||||
if len(m) == 0 {
|
||||
return VersionPurgeStatusType("")
|
||||
}
|
||||
completed := 0
|
||||
for _, v := range m {
|
||||
switch v {
|
||||
case Failed:
|
||||
return Failed
|
||||
case Complete:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
if completed == len(m) {
|
||||
return Complete
|
||||
}
|
||||
return Pending
|
||||
}
|
||||
|
||||
// getHealReplicateObjectInfo returns info needed by heal replication in ReplicateObjectInfo
|
||||
func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) ReplicateObjectInfo {
|
||||
oi := objInfo.Clone()
|
||||
if rcfg.Config != nil && rcfg.Config.RoleArn != "" {
|
||||
// For backward compatibility of objects pending/failed replication.
|
||||
// Save replication related statuses in the new internal representation for
|
||||
// compatible behavior.
|
||||
if !oi.ReplicationStatus.Empty() {
|
||||
oi.ReplicationStatusInternal = fmt.Sprintf("%s=%s;", rcfg.Config.RoleArn, oi.ReplicationStatus)
|
||||
}
|
||||
if !oi.VersionPurgeStatus.Empty() {
|
||||
oi.VersionPurgeStatusInternal = fmt.Sprintf("%s=%s;", rcfg.Config.RoleArn, oi.VersionPurgeStatus)
|
||||
}
|
||||
for k, v := range oi.UserDefined {
|
||||
switch {
|
||||
case strings.EqualFold(k, ReservedMetadataPrefixLower+ReplicationReset):
|
||||
delete(oi.UserDefined, k)
|
||||
oi.UserDefined[targetResetHeader(rcfg.Config.RoleArn)] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
var dsc ReplicateDecision
|
||||
var tgtStatuses map[string]replication.StatusType
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
dsc = checkReplicateDelete(GlobalContext, oi.Bucket, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: oi.Name,
|
||||
VersionID: oi.VersionID,
|
||||
},
|
||||
}, oi, ObjectOptions{}, nil)
|
||||
} else {
|
||||
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: oi.UserDefined,
|
||||
}, replication.HealReplicationType, ObjectOptions{}))
|
||||
}
|
||||
tgtStatuses = replicationStatusesMap(oi.ReplicationStatusInternal)
|
||||
|
||||
existingObjResync := rcfg.Resync(GlobalContext, oi, &dsc, tgtStatuses)
|
||||
|
||||
return ReplicateObjectInfo{
|
||||
ObjectInfo: oi,
|
||||
OpType: replication.HealReplicationType,
|
||||
Dsc: dsc,
|
||||
ExistingObjResync: existingObjResync,
|
||||
TargetStatuses: tgtStatuses,
|
||||
}
|
||||
}
|
||||
|
||||
// vID here represents the versionID client specified in request - need to distinguish between delete marker and delete marker deletion
|
||||
func (o *ObjectInfo) getReplicationState(dsc string, vID string, heal bool) ReplicationState {
|
||||
rs := ReplicationState{
|
||||
ReplicationStatusInternal: o.ReplicationStatusInternal,
|
||||
VersionPurgeStatusInternal: o.VersionPurgeStatusInternal,
|
||||
ReplicateDecisionStr: dsc,
|
||||
Targets: make(map[string]replication.StatusType),
|
||||
PurgeTargets: make(map[string]VersionPurgeStatusType),
|
||||
ResetStatusesMap: make(map[string]string),
|
||||
}
|
||||
rs.Targets = replicationStatusesMap(o.ReplicationStatusInternal)
|
||||
rs.PurgeTargets = versionPurgeStatusesMap(o.VersionPurgeStatusInternal)
|
||||
for k, v := range o.UserDefined {
|
||||
if strings.HasPrefix(k, ReservedMetadataPrefixLower+ReplicationReset) {
|
||||
arn := strings.TrimPrefix(k, fmt.Sprintf("%s-", ReservedMetadataPrefixLower+ReplicationReset))
|
||||
rs.ResetStatusesMap[arn] = v
|
||||
}
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
// ReplicationState returns replication state using other internal replication metadata in ObjectToDelete
|
||||
func (o *ObjectToDelete) ReplicationState() ReplicationState {
|
||||
r := ReplicationState{
|
||||
ReplicationStatusInternal: o.DeleteMarkerReplicationStatus,
|
||||
VersionPurgeStatusInternal: o.VersionPurgeStatuses,
|
||||
ReplicateDecisionStr: o.ReplicateDecisionStr,
|
||||
}
|
||||
|
||||
r.Targets = replicationStatusesMap(o.DeleteMarkerReplicationStatus)
|
||||
r.PurgeTargets = versionPurgeStatusesMap(o.VersionPurgeStatuses)
|
||||
return r
|
||||
}
|
||||
|
||||
// VersionPurgeStatus returns a composite version purge status across targets
|
||||
func (d *DeletedObject) VersionPurgeStatus() VersionPurgeStatusType {
|
||||
return d.ReplicationState.CompositeVersionPurgeStatus()
|
||||
}
|
||||
|
||||
// DeleteMarkerReplicationStatus return composite replication status of delete marker across targets
|
||||
func (d *DeletedObject) DeleteMarkerReplicationStatus() replication.StatusType {
|
||||
return d.ReplicationState.CompositeReplicationStatus()
|
||||
}
|
||||
|
||||
// ResyncTargetsInfo holds a slice of targets with resync info per target
|
||||
type ResyncTargetsInfo struct {
|
||||
Targets []ResyncTarget `json:"target,omitempty"`
|
||||
}
|
||||
|
||||
// ResyncTarget is a struct representing the Target reset ID where target is identified by its Arn
|
||||
type ResyncTarget struct {
|
||||
Arn string `json:"arn"`
|
||||
ResetID string `json:"resetid"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
EndTime time.Time `json:"endTime"`
|
||||
// Status of resync operation
|
||||
ResyncStatus string `json:"resyncStatus,omitempty"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize"`
|
||||
// Total number of failed operations
|
||||
FailedCount int64 `json:"failedReplicationCount"`
|
||||
// Total number of failed operations
|
||||
ReplicatedCount int64 `json:"replicationCount"`
|
||||
// Last bucket/object replicated.
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Object string `json:"object,omitempty"`
|
||||
}
|
||||
|
||||
// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication
|
||||
type VersionPurgeStatusType string
|
||||
|
||||
const (
|
||||
// Pending - versioned delete replication is pending.
|
||||
Pending VersionPurgeStatusType = "PENDING"
|
||||
|
||||
// Complete - versioned delete replication is now complete, erase version on disk.
|
||||
Complete VersionPurgeStatusType = "COMPLETE"
|
||||
|
||||
// Failed - versioned delete replication failed.
|
||||
Failed VersionPurgeStatusType = "FAILED"
|
||||
)
|
||||
|
||||
// Empty returns true if purge status was not set.
|
||||
func (v VersionPurgeStatusType) Empty() bool {
|
||||
return string(v) == ""
|
||||
}
|
||||
|
||||
// Pending returns true if the version is pending purge.
|
||||
func (v VersionPurgeStatusType) Pending() bool {
|
||||
return v == Pending || v == Failed
|
||||
}
|
||||
|
||||
type replicationResyncState struct {
|
||||
// map of bucket to their resync status
|
||||
statusMap map[string]BucketReplicationResyncStatus
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
const (
|
||||
replicationDir = "replication"
|
||||
resyncFileName = "resync.bin"
|
||||
resyncMetaFormat = 1
|
||||
resyncMetaVersionV1 = 1
|
||||
resyncMetaVersion = resyncMetaVersionV1
|
||||
)
|
||||
|
||||
// ResyncStatusType status of resync operation
|
||||
type ResyncStatusType int
|
||||
|
||||
const (
|
||||
// NoResync - no resync in progress
|
||||
NoResync ResyncStatusType = iota
|
||||
// ResyncStarted - resync in progress
|
||||
ResyncStarted
|
||||
// ResyncCompleted - resync finished
|
||||
ResyncCompleted
|
||||
// ResyncFailed - resync failed
|
||||
ResyncFailed
|
||||
)
|
||||
|
||||
func (rt ResyncStatusType) String() string {
|
||||
switch rt {
|
||||
case ResyncStarted:
|
||||
return "Ongoing"
|
||||
case ResyncCompleted:
|
||||
return "Completed"
|
||||
case ResyncFailed:
|
||||
return "Failed"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TargetReplicationResyncStatus status of resync of bucket for a specific target
|
||||
type TargetReplicationResyncStatus struct {
|
||||
StartTime time.Time `json:"startTime" msg:"st"`
|
||||
EndTime time.Time `json:"endTime" msg:"et"`
|
||||
// Resync ID assigned to this reset
|
||||
ResyncID string `json:"resyncID" msg:"id"`
|
||||
// ResyncBeforeDate - resync all objects created prior to this date
|
||||
ResyncBeforeDate time.Time `json:"resyncBeforeDate" msg:"rdt"`
|
||||
// Status of resync operation
|
||||
ResyncStatus ResyncStatusType `json:"resyncStatus" msg:"rst"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize" msg:"fs"`
|
||||
// Total number of failed operations
|
||||
FailedCount int64 `json:"failedReplicationCount" msg:"frc"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize" msg:"rs"`
|
||||
// Total number of failed operations
|
||||
ReplicatedCount int64 `json:"replicationCount" msg:"rrc"`
|
||||
// Last bucket/object replicated.
|
||||
Bucket string `json:"-" msg:"bkt"`
|
||||
Object string `json:"-" msg:"obj"`
|
||||
}
|
||||
|
||||
// BucketReplicationResyncStatus captures current replication resync status
|
||||
type BucketReplicationResyncStatus struct {
|
||||
Version int `json:"version" msg:"v"`
|
||||
// map of remote arn to their resync status for a bucket
|
||||
TargetsMap map[string]TargetReplicationResyncStatus `json:"resyncMap,omitempty" msg:"brs"`
|
||||
ID int `json:"id" msg:"id"`
|
||||
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
|
||||
}
|
||||
|
||||
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
|
||||
return BucketReplicationResyncStatus{
|
||||
TargetsMap: make(map[string]TargetReplicationResyncStatus),
|
||||
Version: resyncMetaVersion,
|
||||
}
|
||||
}
|
||||
|
||||
var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
|
||||
|
||||
// parse size from content-range header
|
||||
func parseSizeFromContentRange(h http.Header) (sz int64, err error) {
|
||||
cr := h.Get(xhttp.ContentRange)
|
||||
if cr == "" {
|
||||
return sz, fmt.Errorf("Content-Range not set")
|
||||
}
|
||||
parts := contentRangeRegexp.FindStringSubmatch(cr)
|
||||
if len(parts) != 4 {
|
||||
return sz, fmt.Errorf("invalid Content-Range header %s", cr)
|
||||
}
|
||||
if parts[3] == "*" {
|
||||
return -1, nil
|
||||
}
|
||||
var usz uint64
|
||||
usz, err = strconv.ParseUint(parts[3], 10, 64)
|
||||
if err != nil {
|
||||
return sz, err
|
||||
}
|
||||
return int64(usz), nil
|
||||
}
|
||||
2100
cmd/bucket-replication-utils_gen.go
Normal file
2100
cmd/bucket-replication-utils_gen.go
Normal file
File diff suppressed because it is too large
Load Diff
914
cmd/bucket-replication-utils_gen_test.go
Normal file
914
cmd/bucket-replication-utils_gen_test.go
Normal file
@@ -0,0 +1,914 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBucketReplicationResyncStatus(t *testing.T) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketReplicationResyncStatus(t *testing.T) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketReplicationResyncStatus Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketReplicationResyncStatus{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicateDecision(t *testing.T) {
|
||||
v := ReplicateDecision{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgReplicateDecision(b *testing.B) {
|
||||
v := ReplicateDecision{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgReplicateDecision(b *testing.B) {
|
||||
v := ReplicateDecision{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalReplicateDecision(b *testing.B) {
|
||||
v := ReplicateDecision{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeReplicateDecision(t *testing.T) {
|
||||
v := ReplicateDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeReplicateDecision Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ReplicateDecision{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeReplicateDecision(b *testing.B) {
|
||||
v := ReplicateDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeReplicateDecision(b *testing.B) {
|
||||
v := ReplicateDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicationState(t *testing.T) {
|
||||
v := ReplicationState{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgReplicationState(b *testing.B) {
|
||||
v := ReplicationState{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgReplicationState(b *testing.B) {
|
||||
v := ReplicationState{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalReplicationState(b *testing.B) {
|
||||
v := ReplicationState{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeReplicationState(t *testing.T) {
|
||||
v := ReplicationState{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeReplicationState Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ReplicationState{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeReplicationState(b *testing.B) {
|
||||
v := ReplicationState{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeReplicationState(b *testing.B) {
|
||||
v := ReplicationState{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalResyncDecision(t *testing.T) {
|
||||
v := ResyncDecision{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgResyncDecision(b *testing.B) {
|
||||
v := ResyncDecision{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgResyncDecision(b *testing.B) {
|
||||
v := ResyncDecision{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalResyncDecision(b *testing.B) {
|
||||
v := ResyncDecision{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeResyncDecision(t *testing.T) {
|
||||
v := ResyncDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeResyncDecision Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ResyncDecision{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeResyncDecision(b *testing.B) {
|
||||
v := ResyncDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeResyncDecision(b *testing.B) {
|
||||
v := ResyncDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalResyncTarget(t *testing.T) {
|
||||
v := ResyncTarget{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgResyncTarget(b *testing.B) {
|
||||
v := ResyncTarget{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgResyncTarget(b *testing.B) {
|
||||
v := ResyncTarget{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalResyncTarget(b *testing.B) {
|
||||
v := ResyncTarget{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeResyncTarget(t *testing.T) {
|
||||
v := ResyncTarget{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeResyncTarget Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ResyncTarget{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeResyncTarget(b *testing.B) {
|
||||
v := ResyncTarget{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeResyncTarget(b *testing.B) {
|
||||
v := ResyncTarget{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalResyncTargetDecision(t *testing.T) {
|
||||
v := ResyncTargetDecision{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgResyncTargetDecision(b *testing.B) {
|
||||
v := ResyncTargetDecision{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgResyncTargetDecision(b *testing.B) {
|
||||
v := ResyncTargetDecision{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalResyncTargetDecision(b *testing.B) {
|
||||
v := ResyncTargetDecision{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeResyncTargetDecision(t *testing.T) {
|
||||
v := ResyncTargetDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeResyncTargetDecision Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ResyncTargetDecision{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeResyncTargetDecision(b *testing.B) {
|
||||
v := ResyncTargetDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeResyncTargetDecision(b *testing.B) {
|
||||
v := ResyncTargetDecision{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalResyncTargetsInfo(t *testing.T) {
|
||||
v := ResyncTargetsInfo{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgResyncTargetsInfo(b *testing.B) {
|
||||
v := ResyncTargetsInfo{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgResyncTargetsInfo(b *testing.B) {
|
||||
v := ResyncTargetsInfo{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalResyncTargetsInfo(b *testing.B) {
|
||||
v := ResyncTargetsInfo{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeResyncTargetsInfo(t *testing.T) {
|
||||
v := ResyncTargetsInfo{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeResyncTargetsInfo Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ResyncTargetsInfo{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeResyncTargetsInfo(b *testing.B) {
|
||||
v := ResyncTargetsInfo{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeResyncTargetsInfo(b *testing.B) {
|
||||
v := ResyncTargetsInfo{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalTargetReplicationResyncStatus(t *testing.T) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeTargetReplicationResyncStatus(t *testing.T) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeTargetReplicationResyncStatus Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := TargetReplicationResyncStatus{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
247
cmd/bucket-replication-utils_test.go
Normal file
247
cmd/bucket-replication-utils_test.go
Normal file
@@ -0,0 +1,247 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
)
|
||||
|
||||
var replicatedInfosTests = []struct {
|
||||
name string
|
||||
tgtInfos []replicatedTargetInfo
|
||||
expectedCompletedSize int64
|
||||
expectedReplicationStatusInternal string
|
||||
expectedReplicationStatus replication.StatusType
|
||||
expectedOpType replication.Type
|
||||
expectedAction replicationAction
|
||||
}{
|
||||
{ // 1. empty tgtInfos slice
|
||||
name: "no replicated targets",
|
||||
tgtInfos: []replicatedTargetInfo{},
|
||||
expectedCompletedSize: 0,
|
||||
expectedReplicationStatusInternal: "",
|
||||
expectedReplicationStatus: replication.StatusType(""),
|
||||
expectedOpType: replication.UnsetReplicationType,
|
||||
expectedAction: replicateNone,
|
||||
},
|
||||
{ // 2. replication completed to single target
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
Arn: "arn1",
|
||||
Size: 249,
|
||||
PrevReplicationStatus: replication.Pending,
|
||||
ReplicationStatus: replication.Completed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 249,
|
||||
expectedReplicationStatusInternal: "arn1=COMPLETED;",
|
||||
expectedReplicationStatus: replication.Completed,
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ // 3. replication completed to single target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
Arn: "arn1",
|
||||
Size: 249,
|
||||
PrevReplicationStatus: replication.Pending,
|
||||
ReplicationStatus: replication.Completed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
},
|
||||
{
|
||||
Arn: "arn2",
|
||||
Size: 249,
|
||||
PrevReplicationStatus: replication.Pending,
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 249,
|
||||
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ // 4. replication pending on one target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
Arn: "arn1",
|
||||
Size: 249,
|
||||
PrevReplicationStatus: replication.Pending,
|
||||
ReplicationStatus: replication.Pending,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
},
|
||||
{
|
||||
Arn: "arn2",
|
||||
Size: 249,
|
||||
PrevReplicationStatus: replication.Pending,
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 0,
|
||||
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
}
|
||||
|
||||
func TestReplicatedInfos(t *testing.T) {
|
||||
for i, test := range replicatedInfosTests {
|
||||
rinfos := replicatedInfos{
|
||||
Targets: test.tgtInfos,
|
||||
}
|
||||
if actualSize := rinfos.CompletedSize(); actualSize != test.expectedCompletedSize {
|
||||
t.Errorf("Test%d (%s): Size got %d , want %d", i+1, test.name, actualSize, test.expectedCompletedSize)
|
||||
}
|
||||
if repStatusStr := rinfos.ReplicationStatusInternal(); repStatusStr != test.expectedReplicationStatusInternal {
|
||||
t.Errorf("Test%d (%s): Internal replication status got %s , want %s", i+1, test.name, repStatusStr, test.expectedReplicationStatusInternal)
|
||||
}
|
||||
if repStatus := rinfos.ReplicationStatus(); repStatus != test.expectedReplicationStatus {
|
||||
t.Errorf("Test%d (%s): ReplicationStatus got %s , want %s", i+1, test.name, repStatus, test.expectedReplicationStatus)
|
||||
}
|
||||
if action := rinfos.Action(); action != test.expectedAction {
|
||||
t.Errorf("Test%d (%s): Action got %s , want %s", i+1, test.name, action, test.expectedAction)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parseReplicationDecisionTest = []struct {
|
||||
name string
|
||||
dsc string
|
||||
expDsc ReplicateDecision
|
||||
expErr error
|
||||
}{
|
||||
{ // 1.
|
||||
name: "empty string",
|
||||
dsc: "",
|
||||
expDsc: ReplicateDecision{
|
||||
targetsMap: map[string]replicateTargetDecision{},
|
||||
},
|
||||
expErr: nil,
|
||||
},
|
||||
|
||||
{ // 2.
|
||||
name: "replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: nil,
|
||||
expDsc: ReplicateDecision{
|
||||
targetsMap: map[string]replicateTargetDecision{
|
||||
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // 3.
|
||||
name: "replicate decision for multiple targets",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
|
||||
expErr: nil,
|
||||
expDsc: ReplicateDecision{
|
||||
targetsMap: map[string]replicateTargetDecision{
|
||||
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
|
||||
"arn:minio:replication::id2:bucket": newReplicateTargetDecision("arn:minio:replication::id2:bucket", false, true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // 4.
|
||||
name: "invalid format replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: errInvalidReplicateDecisionFormat,
|
||||
expDsc: ReplicateDecision{
|
||||
targetsMap: map[string]replicateTargetDecision{
|
||||
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseReplicateDecision(t *testing.T) {
|
||||
for i, test := range parseReplicationDecisionTest {
|
||||
dsc, err := parseReplicateDecision(test.expDsc.String())
|
||||
if err != nil {
|
||||
if test.expErr != err {
|
||||
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(dsc.targetsMap) != len(test.expDsc.targetsMap) {
|
||||
t.Errorf("Test%d (%s): Invalid number of entries in targetsMap got %d , want %d", i+1, test.name, len(dsc.targetsMap), len(test.expDsc.targetsMap))
|
||||
}
|
||||
for arn, tdsc := range dsc.targetsMap {
|
||||
expDsc, ok := test.expDsc.targetsMap[arn]
|
||||
if !ok || expDsc != tdsc {
|
||||
t.Errorf("Test%d (%s): Invalid target replicate decision: got %+v, want %+v", i+1, test.name, tdsc, expDsc)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var replicationStateTest = []struct {
|
||||
name string
|
||||
rs ReplicationState
|
||||
arn string
|
||||
expStatus replication.StatusType
|
||||
}{
|
||||
{ // 1. no replication status header
|
||||
name: "no replicated targets",
|
||||
rs: ReplicationState{},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ // 2. replication status for one target
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
|
||||
expStatus: replication.Pending,
|
||||
},
|
||||
{ // 3. replication status for one target - incorrect format
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ // 4. replication status for 3 targets, one of them failed
|
||||
name: "replication status for 3 targets - one failed",
|
||||
rs: ReplicationState{
|
||||
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
|
||||
Targets: map[string]replication.StatusType{"arn1": "COMPLETED", "arn2": "COMPLETED", "arn3": "FAILED"},
|
||||
},
|
||||
expStatus: replication.Failed,
|
||||
},
|
||||
{ // 5. replication status for replica version
|
||||
name: "replication status for replica version",
|
||||
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
|
||||
expStatus: replication.Replica,
|
||||
},
|
||||
}
|
||||
|
||||
func TestCompositeReplicationStatus(t *testing.T) {
|
||||
for i, test := range replicationStateTest {
|
||||
if rstatus := test.rs.CompositeReplicationStatus(); rstatus != test.expStatus {
|
||||
t.Errorf("Test%d (%s): Overall replication status got %s , want %s", i+1, test.name, rstatus, test.expStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
@@ -50,29 +51,32 @@ var replicationConfigTests = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
rcfg replicationConfig
|
||||
dsc ReplicateDecision
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ //1. no replication config
|
||||
{ // 1. no replication config
|
||||
name: "no replication config",
|
||||
info: ObjectInfo{Size: 100},
|
||||
rcfg: replicationConfig{Config: nil},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //2. existing object replication config enabled, no versioning
|
||||
{ // 2. existing object replication config enabled, no versioning
|
||||
name: "existing object replication config enabled, no versioning",
|
||||
info: ObjectInfo{Size: 100},
|
||||
rcfg: replicationConfig{Config: &configs[0]},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //3. existing object replication config enabled, versioning suspended
|
||||
{ // 3. existing object replication config enabled, versioning suspended
|
||||
name: "existing object replication config enabled, versioning suspended",
|
||||
info: ObjectInfo{Size: 100, VersionID: nullVersionID},
|
||||
rcfg: replicationConfig{Config: &configs[0]},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
{ // 4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
name: "existing object replication enabled, versioning enabled; no reset in progress",
|
||||
info: ObjectInfo{Size: 100,
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
@@ -84,126 +88,203 @@ var replicationConfigTests = []struct {
|
||||
func TestReplicationResync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for i, test := range replicationConfigTests {
|
||||
if sync := test.rcfg.Resync(ctx, test.info); sync != test.expectedSync {
|
||||
t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync, test.expectedSync)
|
||||
if sync := test.rcfg.Resync(ctx, test.info, &test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync.mustResync(), test.expectedSync)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var start = UTCNow().AddDate(0, 0, -1)
|
||||
var replicationConfigTests2 = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
replicate bool
|
||||
rcfg replicationConfig
|
||||
expectedSync bool
|
||||
}{
|
||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||
// 1: Pending replication
|
||||
name: "existing object replication on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
var (
|
||||
start = UTCNow().AddDate(0, 0, -1)
|
||||
replicationConfigTests2 = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
rcfg replicationConfig
|
||||
dsc ReplicateDecision
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||
// 1: Pending replication
|
||||
name: "existing object replication on object in Pending replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 2. replication status Failed
|
||||
name: "existing object replication on object in Failed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
|
||||
{ // 2. replication status Failed
|
||||
name: "existing object replication on object in Failed replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: true,
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
{ // 3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
{ // 4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: false,
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
rcfg: replicationConfig{ResetID: "xyz", ResetBeforeDate: UTCNow()},
|
||||
},
|
||||
{ //6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED;",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
expectedSync: true,
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
rcfg: replicationConfig{ResetID: "xyz", ResetBeforeDate: UTCNow()},
|
||||
},
|
||||
{ //7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
|
||||
expectedSync: true,
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
rcfg: replicationConfig{ResetID: "xyz", ResetBeforeDate: UTCNow()},
|
||||
},
|
||||
{ //8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||
|
||||
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
rcfg: replicationConfig{ResetID: "xyz", ResetBeforeDate: UTCNow()},
|
||||
},
|
||||
{ //9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), "xyz")},
|
||||
ModTime: UTCNow().AddDate(0, 0, -1),
|
||||
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
||||
ModTime: UTCNow().AddDate(0, 0, -2),
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: true,
|
||||
rcfg: replicationConfig{ResetID: "abc", ResetBeforeDate: UTCNow()},
|
||||
},
|
||||
{ //10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||
},
|
||||
expectedSync: false,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: start,
|
||||
}}},
|
||||
},
|
||||
},
|
||||
replicate: true,
|
||||
expectedSync: false,
|
||||
rcfg: replicationConfig{ResetID: "xyz", ResetBeforeDate: UTCNow()},
|
||||
},
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func TestReplicationResyncwrapper(t *testing.T) {
|
||||
for i, test := range replicationConfigTests2 {
|
||||
if sync := test.rcfg.resync(test.info, test.replicate); sync != test.expectedSync {
|
||||
t.Errorf("%s (%s): Replicationresync got %t , want %t", fmt.Sprintf("Test%d - %s", i+1, time.Now().Format(http.TimeFormat)), test.name, sync, test.expectedSync)
|
||||
if sync := test.rcfg.resync(test.info, &test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
|
||||
t.Errorf("%s (%s): Replicationresync got %t , want %t", fmt.Sprintf("Test%d - %s", i+1, time.Now().Format(http.TimeFormat)), test.name, sync.mustResync(), test.expectedSync)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,8 +17,40 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// ReplicationLatency holds information of bucket operations latency, such us uploads
|
||||
type ReplicationLatency struct {
|
||||
// Single & Multipart PUTs latency
|
||||
UploadHistogram LastMinuteLatencies
|
||||
}
|
||||
|
||||
// Merge two replication latency into a new one
|
||||
func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) {
|
||||
newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram)
|
||||
return
|
||||
}
|
||||
|
||||
// Get upload latency of each object size range
|
||||
func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) {
|
||||
ret = make(map[string]uint64)
|
||||
avg := rl.UploadHistogram.GetAvgData()
|
||||
for k, v := range avg {
|
||||
// Convert nanoseconds to milliseconds
|
||||
ret[sizeTagToString(k)] = v.avg() / uint64(time.Millisecond)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Update replication upload latency with a new value
|
||||
func (rl *ReplicationLatency) update(size int64, duration time.Duration) {
|
||||
rl.UploadHistogram.Add(size, duration)
|
||||
}
|
||||
|
||||
// BucketStats bucket statistics
|
||||
type BucketStats struct {
|
||||
ReplicationStats BucketReplicationStats
|
||||
@@ -27,16 +59,67 @@ type BucketStats struct {
|
||||
// BucketReplicationStats represents inline replication statistics
|
||||
// such as pending, failed and completed bytes in total for a bucket
|
||||
type BucketReplicationStats struct {
|
||||
Stats map[string]*BucketReplicationStat
|
||||
// Pending size in bytes
|
||||
PendingSize uint64 `json:"pendingReplicationSize"`
|
||||
PendingSize int64 `json:"pendingReplicationSize"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize uint64 `json:"completedReplicationSize"`
|
||||
ReplicatedSize int64 `json:"completedReplicationSize"`
|
||||
// Total Replica size in bytes
|
||||
ReplicaSize uint64 `json:"replicaSize"`
|
||||
ReplicaSize int64 `json:"replicaSize"`
|
||||
// Failed size in bytes
|
||||
FailedSize uint64 `json:"failedReplicationSize"`
|
||||
FailedSize int64 `json:"failedReplicationSize"`
|
||||
// Total number of pending operations including metadata updates
|
||||
PendingCount uint64 `json:"pendingReplicationCount"`
|
||||
PendingCount int64 `json:"pendingReplicationCount"`
|
||||
// Total number of failed operations including metadata updates
|
||||
FailedCount uint64 `json:"failedReplicationCount"`
|
||||
FailedCount int64 `json:"failedReplicationCount"`
|
||||
}
|
||||
|
||||
// Empty returns true if there are no target stats
|
||||
func (brs *BucketReplicationStats) Empty() bool {
|
||||
return len(brs.Stats) == 0 && brs.ReplicaSize == 0
|
||||
}
|
||||
|
||||
// Clone creates a new BucketReplicationStats copy
|
||||
func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
|
||||
// This is called only by replicationStats cache and already holds a
|
||||
// read lock before calling Clone()
|
||||
|
||||
c = brs
|
||||
// We need to copy the map, so we do not reference the one in `brs`.
|
||||
c.Stats = make(map[string]*BucketReplicationStat, len(brs.Stats))
|
||||
for arn, st := range brs.Stats {
|
||||
// make a copy of `*st`
|
||||
s := *st
|
||||
c.Stats[arn] = &s
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// BucketReplicationStat represents inline replication statistics
|
||||
// such as pending, failed and completed bytes in total for a bucket
|
||||
// remote target
|
||||
type BucketReplicationStat struct {
|
||||
// Pending size in bytes
|
||||
PendingSize int64 `json:"pendingReplicationSize"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize"`
|
||||
// Total Replica size in bytes
|
||||
ReplicaSize int64 `json:"replicaSize"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize"`
|
||||
// Total number of pending operations including metadata updates
|
||||
PendingCount int64 `json:"pendingReplicationCount"`
|
||||
// Total number of failed operations including metadata updates
|
||||
FailedCount int64 `json:"failedReplicationCount"`
|
||||
// Replication latency information
|
||||
Latency ReplicationLatency `json:"replicationLatency"`
|
||||
}
|
||||
|
||||
func (bs *BucketReplicationStat) hasReplicationUsage() bool {
|
||||
return bs.FailedSize > 0 ||
|
||||
bs.ReplicatedSize > 0 ||
|
||||
bs.ReplicaSize > 0 ||
|
||||
bs.FailedCount > 0 ||
|
||||
bs.PendingCount > 0 ||
|
||||
bs.PendingSize > 0
|
||||
}
|
||||
|
||||
@@ -6,6 +6,318 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketReplicationStat) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "PendingSize":
|
||||
z.PendingSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
case "ReplicatedSize":
|
||||
z.ReplicatedSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
case "ReplicaSize":
|
||||
z.ReplicaSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
case "FailedSize":
|
||||
z.FailedSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
case "PendingCount":
|
||||
z.PendingCount, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
case "FailedCount":
|
||||
z.FailedCount, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
case "Latency":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
err = z.Latency.UploadHistogram.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketReplicationStat) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 7
|
||||
// write "PendingSize"
|
||||
err = en.Append(0x87, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.PendingSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
// write "ReplicatedSize"
|
||||
err = en.Append(0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.ReplicatedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
// write "ReplicaSize"
|
||||
err = en.Append(0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.ReplicaSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
// write "FailedSize"
|
||||
err = en.Append(0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.FailedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
// write "PendingCount"
|
||||
err = en.Append(0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.PendingCount)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
// write "FailedCount"
|
||||
err = en.Append(0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.FailedCount)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
// write "Latency"
|
||||
err = en.Append(0xa7, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// map header, size 1
|
||||
// write "UploadHistogram"
|
||||
err = en.Append(0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Latency.UploadHistogram.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketReplicationStat) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 7
|
||||
// string "PendingSize"
|
||||
o = append(o, 0x87, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.PendingSize)
|
||||
// string "ReplicatedSize"
|
||||
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.ReplicatedSize)
|
||||
// string "ReplicaSize"
|
||||
o = append(o, 0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.ReplicaSize)
|
||||
// string "FailedSize"
|
||||
o = append(o, 0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.FailedSize)
|
||||
// string "PendingCount"
|
||||
o = append(o, 0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendInt64(o, z.PendingCount)
|
||||
// string "FailedCount"
|
||||
o = append(o, 0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendInt64(o, z.FailedCount)
|
||||
// string "Latency"
|
||||
o = append(o, 0xa7, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
|
||||
// map header, size 1
|
||||
// string "UploadHistogram"
|
||||
o = append(o, 0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
o, err = z.Latency.UploadHistogram.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BucketReplicationStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "PendingSize":
|
||||
z.PendingSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
case "ReplicatedSize":
|
||||
z.ReplicatedSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
case "ReplicaSize":
|
||||
z.ReplicaSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
case "FailedSize":
|
||||
z.FailedSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
case "PendingCount":
|
||||
z.PendingCount, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
case "FailedCount":
|
||||
z.FailedCount, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
case "Latency":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
bts, err = z.Latency.UploadHistogram.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketReplicationStat) Msgsize() (s int) {
|
||||
s = 1 + 12 + msgp.Int64Size + 15 + msgp.Int64Size + 12 + msgp.Int64Size + 11 + msgp.Int64Size + 13 + msgp.Int64Size + 12 + msgp.Int64Size + 8 + 1 + 16 + z.Latency.UploadHistogram.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
@@ -24,38 +336,80 @@ func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Stats":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats")
|
||||
return
|
||||
}
|
||||
if z.Stats == nil {
|
||||
z.Stats = make(map[string]*BucketReplicationStat, zb0002)
|
||||
} else if len(z.Stats) > 0 {
|
||||
for key := range z.Stats {
|
||||
delete(z.Stats, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
var za0001 string
|
||||
var za0002 *BucketReplicationStat
|
||||
za0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats")
|
||||
return
|
||||
}
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats", za0001)
|
||||
return
|
||||
}
|
||||
za0002 = nil
|
||||
} else {
|
||||
if za0002 == nil {
|
||||
za0002 = new(BucketReplicationStat)
|
||||
}
|
||||
err = za0002.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
z.Stats[za0001] = za0002
|
||||
}
|
||||
case "PendingSize":
|
||||
z.PendingSize, err = dc.ReadUint64()
|
||||
z.PendingSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
case "ReplicatedSize":
|
||||
z.ReplicatedSize, err = dc.ReadUint64()
|
||||
z.ReplicatedSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
case "ReplicaSize":
|
||||
z.ReplicaSize, err = dc.ReadUint64()
|
||||
z.ReplicaSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
case "FailedSize":
|
||||
z.FailedSize, err = dc.ReadUint64()
|
||||
z.FailedSize, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
case "PendingCount":
|
||||
z.PendingCount, err = dc.ReadUint64()
|
||||
z.PendingCount, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
case "FailedCount":
|
||||
z.FailedCount, err = dc.ReadUint64()
|
||||
z.FailedCount, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
@@ -73,13 +427,42 @@ func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// write "PendingSize"
|
||||
err = en.Append(0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
// map header, size 7
|
||||
// write "Stats"
|
||||
err = en.Append(0x87, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.PendingSize)
|
||||
err = en.WriteMapHeader(uint32(len(z.Stats)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats")
|
||||
return
|
||||
}
|
||||
for za0001, za0002 := range z.Stats {
|
||||
err = en.WriteString(za0001)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats")
|
||||
return
|
||||
}
|
||||
if za0002 == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = za0002.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// write "PendingSize"
|
||||
err = en.Append(0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.PendingSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
@@ -89,7 +472,7 @@ func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicatedSize)
|
||||
err = en.WriteInt64(z.ReplicatedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
@@ -99,7 +482,7 @@ func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicaSize)
|
||||
err = en.WriteInt64(z.ReplicaSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
@@ -109,7 +492,7 @@ func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.FailedSize)
|
||||
err = en.WriteInt64(z.FailedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
@@ -119,7 +502,7 @@ func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.PendingCount)
|
||||
err = en.WriteInt64(z.PendingCount)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
@@ -129,7 +512,7 @@ func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.FailedCount)
|
||||
err = en.WriteInt64(z.FailedCount)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
@@ -140,25 +523,40 @@ func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketReplicationStats) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// string "Stats"
|
||||
o = append(o, 0x87, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Stats)))
|
||||
for za0001, za0002 := range z.Stats {
|
||||
o = msgp.AppendString(o, za0001)
|
||||
if za0002 == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o, err = za0002.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// string "PendingSize"
|
||||
o = append(o, 0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.PendingSize)
|
||||
o = append(o, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.PendingSize)
|
||||
// string "ReplicatedSize"
|
||||
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ReplicatedSize)
|
||||
o = msgp.AppendInt64(o, z.ReplicatedSize)
|
||||
// string "ReplicaSize"
|
||||
o = append(o, 0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ReplicaSize)
|
||||
o = msgp.AppendInt64(o, z.ReplicaSize)
|
||||
// string "FailedSize"
|
||||
o = append(o, 0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.FailedSize)
|
||||
o = msgp.AppendInt64(o, z.FailedSize)
|
||||
// string "PendingCount"
|
||||
o = append(o, 0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendUint64(o, z.PendingCount)
|
||||
o = msgp.AppendInt64(o, z.PendingCount)
|
||||
// string "FailedCount"
|
||||
o = append(o, 0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendUint64(o, z.FailedCount)
|
||||
o = msgp.AppendInt64(o, z.FailedCount)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -180,38 +578,79 @@ func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Stats":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats")
|
||||
return
|
||||
}
|
||||
if z.Stats == nil {
|
||||
z.Stats = make(map[string]*BucketReplicationStat, zb0002)
|
||||
} else if len(z.Stats) > 0 {
|
||||
for key := range z.Stats {
|
||||
delete(z.Stats, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
var za0001 string
|
||||
var za0002 *BucketReplicationStat
|
||||
zb0002--
|
||||
za0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats")
|
||||
return
|
||||
}
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
za0002 = nil
|
||||
} else {
|
||||
if za0002 == nil {
|
||||
za0002 = new(BucketReplicationStat)
|
||||
}
|
||||
bts, err = za0002.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Stats", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
z.Stats[za0001] = za0002
|
||||
}
|
||||
case "PendingSize":
|
||||
z.PendingSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
z.PendingSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
case "ReplicatedSize":
|
||||
z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
z.ReplicatedSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
case "ReplicaSize":
|
||||
z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
z.ReplicaSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
case "FailedSize":
|
||||
z.FailedSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
z.FailedSize, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
case "PendingCount":
|
||||
z.PendingCount, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
z.PendingCount, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
case "FailedCount":
|
||||
z.FailedCount, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
z.FailedCount, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
@@ -230,7 +669,19 @@ func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketReplicationStats) Msgsize() (s int) {
|
||||
s = 1 + 12 + msgp.Uint64Size + 15 + msgp.Uint64Size + 12 + msgp.Uint64Size + 11 + msgp.Uint64Size + 13 + msgp.Uint64Size + 12 + msgp.Uint64Size
|
||||
s = 1 + 6 + msgp.MapHeaderSize
|
||||
if z.Stats != nil {
|
||||
for za0001, za0002 := range z.Stats {
|
||||
_ = za0002
|
||||
s += msgp.StringPrefixSize + len(za0001)
|
||||
if za0002 == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += za0002.Msgsize()
|
||||
}
|
||||
}
|
||||
}
|
||||
s += 12 + msgp.Int64Size + 15 + msgp.Int64Size + 12 + msgp.Int64Size + 11 + msgp.Int64Size + 13 + msgp.Int64Size + 12 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
@@ -340,3 +791,110 @@ func (z *BucketStats) Msgsize() (s int) {
|
||||
s = 1 + 17 + z.ReplicationStats.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *ReplicationLatency) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
err = z.UploadHistogram.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ReplicationLatency) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "UploadHistogram"
|
||||
err = en.Append(0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.UploadHistogram.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ReplicationLatency) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "UploadHistogram"
|
||||
o = append(o, 0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
o, err = z.UploadHistogram.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *ReplicationLatency) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
bts, err = z.UploadHistogram.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *ReplicationLatency) Msgsize() (s int) {
|
||||
s = 1 + 16 + z.UploadHistogram.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBucketReplicationStat(t *testing.T) {
|
||||
v := BucketReplicationStat{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketReplicationStat(b *testing.B) {
|
||||
v := BucketReplicationStat{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketReplicationStat(b *testing.B) {
|
||||
v := BucketReplicationStat{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketReplicationStat(b *testing.B) {
|
||||
v := BucketReplicationStat{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketReplicationStat(t *testing.T) {
|
||||
v := BucketReplicationStat{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketReplicationStat Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketReplicationStat{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketReplicationStat(b *testing.B) {
|
||||
v := BucketReplicationStat{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketReplicationStat(b *testing.B) {
|
||||
v := BucketReplicationStat{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBucketReplicationStats(t *testing.T) {
|
||||
v := BucketReplicationStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@@ -234,3 +347,116 @@ func BenchmarkDecodeBucketStats(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicationLatency(t *testing.T) {
|
||||
v := ReplicationLatency{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeReplicationLatency(t *testing.T) {
|
||||
v := ReplicationLatency{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeReplicationLatency Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ReplicationLatency{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -30,6 +28,7 @@ import (
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
@@ -37,7 +36,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHealthCheckDuration = 100 * time.Second
|
||||
defaultHealthCheckDuration = 30 * time.Second
|
||||
)
|
||||
|
||||
// BucketTargetSys represents bucket targets subsystem
|
||||
@@ -93,6 +92,9 @@ func (sys *BucketTargetSys) Delete(bucket string) {
|
||||
return
|
||||
}
|
||||
for _, t := range tgts {
|
||||
if tgt, ok := sys.arnRemotesMap[t.Arn]; ok && tgt.healthCancelFn != nil {
|
||||
tgt.healthCancelFn()
|
||||
}
|
||||
delete(sys.arnRemotesMap, t.Arn)
|
||||
}
|
||||
delete(sys.targetsMap, bucket)
|
||||
@@ -136,24 +138,28 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
defer sys.Unlock()
|
||||
|
||||
tgts := sys.targetsMap[bucket]
|
||||
|
||||
newtgts := make([]madmin.BucketTarget, len(tgts))
|
||||
found := false
|
||||
for idx, t := range tgts {
|
||||
if t.Type == tgt.Type {
|
||||
if t.Arn == tgt.Arn && !update {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
if t.Arn == tgt.Arn {
|
||||
if !update {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newtgts[idx] = t
|
||||
}
|
||||
if !found && !update {
|
||||
newtgts = append(newtgts, *tgt)
|
||||
}
|
||||
|
||||
// cancel health check for previous target client to avoid leak.
|
||||
if prevClnt, ok := sys.arnRemotesMap[tgt.Arn]; ok && prevClnt.healthCancelFn != nil {
|
||||
prevClnt.healthCancelFn()
|
||||
}
|
||||
sys.targetsMap[bucket] = newtgts
|
||||
sys.arnRemotesMap[tgt.Arn] = clnt
|
||||
sys.updateBandwidthLimit(bucket, tgt.BandwidthLimit)
|
||||
@@ -194,12 +200,16 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
if arn.Type == madmin.ReplicationService {
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil && rcfg.RoleArn == arnStr {
|
||||
sys.RLock()
|
||||
_, ok := sys.arnRemotesMap[arnStr]
|
||||
sys.RUnlock()
|
||||
if ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
if err == nil {
|
||||
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{}) {
|
||||
if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) {
|
||||
sys.RLock()
|
||||
_, ok := sys.arnRemotesMap[arnStr]
|
||||
sys.RUnlock()
|
||||
if ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -224,6 +234,9 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
return BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
sys.targetsMap[bucket] = targets
|
||||
if tgt, ok := sys.arnRemotesMap[arnStr]; ok && tgt.healthCancelFn != nil {
|
||||
tgt.healthCancelFn()
|
||||
}
|
||||
delete(sys.arnRemotesMap, arnStr)
|
||||
sys.updateBandwidthLimit(bucket, 0)
|
||||
return nil
|
||||
@@ -259,22 +272,6 @@ func NewBucketTargetSys() *BucketTargetSys {
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the bucket targets subsystem for buckets which have targets configured.
|
||||
func (sys *BucketTargetSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, bucket targets is not supported.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket targets once during boot in background.
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAllTargets updates target to reflect metadata updates
|
||||
func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketTargets) {
|
||||
if sys == nil {
|
||||
@@ -286,6 +283,9 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
||||
// remove target and arn association
|
||||
if tgts, ok := sys.targetsMap[bucket]; ok {
|
||||
for _, t := range tgts {
|
||||
if tgt, ok := sys.arnRemotesMap[t.Arn]; ok && tgt.healthCancelFn != nil {
|
||||
tgt.healthCancelFn()
|
||||
}
|
||||
delete(sys.arnRemotesMap, t.Arn)
|
||||
}
|
||||
}
|
||||
@@ -309,35 +309,33 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
||||
}
|
||||
|
||||
// create minio-go clients for buckets having remote targets
|
||||
func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
for _, bucket := range buckets {
|
||||
cfg, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket.Name)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if cfg == nil || cfg.Empty() {
|
||||
continue
|
||||
}
|
||||
if len(cfg.Targets) > 0 {
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
}
|
||||
for _, tgt := range cfg.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
sys.updateBandwidthLimit(bucket.Name, tgt.BandwidthLimit)
|
||||
}
|
||||
func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
||||
cfg := meta.bucketTargetConfig
|
||||
if cfg == nil || cfg.Empty() {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
if len(cfg.Targets) > 0 {
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
}
|
||||
for _, tgt := range cfg.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
sys.updateBandwidthLimit(bucket.Name, tgt.BandwidthLimit)
|
||||
}
|
||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||
}
|
||||
|
||||
// getRemoteTargetInstanceTransport contains a singleton roundtripper.
|
||||
var getRemoteTargetInstanceTransport http.RoundTripper
|
||||
var getRemoteTargetInstanceTransportOnce sync.Once
|
||||
var (
|
||||
getRemoteTargetInstanceTransport http.RoundTripper
|
||||
getRemoteTargetInstanceTransportOnce sync.Once
|
||||
)
|
||||
|
||||
// Returns a minio-go Client configured to access remote host described in replication target config.
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*TargetClient, error) {
|
||||
@@ -360,6 +358,10 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
||||
if tcfg.HealthCheckDuration >= 1 { // require minimum health check duration of 1 sec.
|
||||
hcDuration = tcfg.HealthCheckDuration
|
||||
}
|
||||
cancelFn, err := api.HealthCheck(hcDuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc := &TargetClient{
|
||||
Client: api,
|
||||
healthCheckDuration: hcDuration,
|
||||
@@ -367,6 +369,9 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
||||
Bucket: tcfg.TargetBucket,
|
||||
StorageClass: tcfg.StorageClass,
|
||||
disableProxy: tcfg.DisableProxy,
|
||||
healthCancelFn: cancelFn,
|
||||
ARN: tcfg.Arn,
|
||||
ResetID: tcfg.ResetID,
|
||||
}
|
||||
return tc, nil
|
||||
}
|
||||
@@ -390,14 +395,9 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
|
||||
|
||||
// generate ARN that is unique to this target type
|
||||
func generateARN(t *madmin.BucketTarget) string {
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(t.Type))
|
||||
hash.Write([]byte(t.Region))
|
||||
hash.Write([]byte(t.TargetBucket))
|
||||
hashSum := hex.EncodeToString(hash.Sum(nil))
|
||||
arn := madmin.ARN{
|
||||
Type: t.Type,
|
||||
ID: hashSum,
|
||||
ID: mustGetUUID(),
|
||||
Region: t.Region,
|
||||
Bucket: t.TargetBucket,
|
||||
}
|
||||
@@ -416,7 +416,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
|
||||
return nil, nil
|
||||
}
|
||||
data = cdata
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if len(cmetadata) != 0 {
|
||||
if err := json.Unmarshal(cmetadata, &meta); err != nil {
|
||||
return nil, err
|
||||
@@ -445,4 +445,7 @@ type TargetClient struct {
|
||||
replicateSync bool
|
||||
StorageClass string // storage class on remote
|
||||
disableProxy bool
|
||||
healthCancelFn context.CancelFunc // cancellation function for client healthcheck
|
||||
ARN string // ARN to uniquely identify remote target
|
||||
ResetID string
|
||||
}
|
||||
|
||||
@@ -63,6 +63,15 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Cluster replication is enabled for this site, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
@@ -86,7 +95,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketVersioningConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -135,5 +144,4 @@ func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r
|
||||
|
||||
// Write bucket versioning configuration to client
|
||||
writeSuccessResponseXML(w, configData)
|
||||
|
||||
}
|
||||
|
||||
@@ -18,12 +18,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -34,16 +38,20 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
fcolor "github.com/fatih/color"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/inconshreveable/mousetrap"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
consoleCerts "github.com/minio/console/pkg/certs"
|
||||
"github.com/minio/console/restapi"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@@ -51,7 +59,6 @@ import (
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/certs"
|
||||
@@ -59,29 +66,66 @@ import (
|
||||
"github.com/minio/pkg/ellipses"
|
||||
"github.com/minio/pkg/env"
|
||||
xnet "github.com/minio/pkg/net"
|
||||
"github.com/rs/dnscache"
|
||||
)
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
var defaultAWSCredProvider []credentials.Provider
|
||||
|
||||
var (
|
||||
shardDiskTimeDelta time.Duration
|
||||
defaultAWSCredProvider []credentials.Provider
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
if mousetrap.StartedByExplorer() {
|
||||
fmt.Printf("Don't double-click %s\n", os.Args[0])
|
||||
fmt.Println("You need to open cmd.exe/PowerShell and run it from the command line")
|
||||
fmt.Println("Refer to the docs here on how to run it as a Windows Service https://github.com/minio/minio-service/tree/master/windows")
|
||||
fmt.Println("Press the Enter Key to Exit")
|
||||
fmt.Scanln()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
|
||||
if IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsPCFTile() {
|
||||
// 30 seconds matches the orchestrator DNS TTLs, have
|
||||
// a 5 second timeout to lookup from DNS servers.
|
||||
globalDNSCache = xhttp.NewDNSCache(30*time.Second, 5*time.Second, logger.LogOnceIf)
|
||||
} else {
|
||||
// On bare-metals DNS do not change often, so it is
|
||||
// safe to assume a higher timeout upto 10 minutes.
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Minute, 5*time.Second, logger.LogOnceIf)
|
||||
}
|
||||
initGlobalContext()
|
||||
|
||||
options := dnscache.ResolverRefreshOptions{
|
||||
ClearUnused: true,
|
||||
PersistOnFailure: false,
|
||||
}
|
||||
|
||||
globalIsCICD = env.Get("MINIO_CI_CD", "") != "" || env.Get("CI", "") != ""
|
||||
|
||||
containers := IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsPCFTile()
|
||||
|
||||
// Call to refresh will refresh names in cache. If you pass true, it will also
|
||||
// remove cached names not looked up since the last call to Refresh. It is a good idea
|
||||
// to call this method on a regular interval.
|
||||
go func() {
|
||||
var t *time.Ticker
|
||||
if containers {
|
||||
t = time.NewTicker(1 * time.Minute)
|
||||
} else {
|
||||
t = time.NewTicker(10 * time.Minute)
|
||||
}
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
globalDNSCache.RefreshWithOptions(options)
|
||||
case <-GlobalContext.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
globalForwarder = handlers.NewForwarder(&handlers.Forwarder{
|
||||
PassHost: true,
|
||||
RoundTripper: newGatewayHTTPTransport(1 * time.Hour),
|
||||
@@ -92,11 +136,11 @@ func init() {
|
||||
},
|
||||
})
|
||||
|
||||
globalTransitionState = newTransitionState()
|
||||
|
||||
console.SetColor("Debug", fcolor.New())
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
gob.Register(madmin.TimeInfo{})
|
||||
gob.Register(map[string]interface{}{})
|
||||
|
||||
defaultAWSCredProvider = []credentials.Provider{
|
||||
&credentials.IAM{
|
||||
@@ -106,6 +150,12 @@ func init() {
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
shardDiskTimeDelta, err = time.ParseDuration(env.Get("_MINIO_SHARD_DISKTIME_DELTA", "1m"))
|
||||
if err != nil {
|
||||
shardDiskTimeDelta = 1 * time.Minute
|
||||
}
|
||||
|
||||
// All minio-go API operations shall be performed only once,
|
||||
// another way to look at this is we are turning off retries.
|
||||
minio.MaxRetry = 1
|
||||
@@ -140,20 +190,37 @@ func minioConfigToConsoleFeatures() {
|
||||
}
|
||||
// if IDP is enabled, set IDP environment variables
|
||||
if globalOpenIDConfig.URL != nil {
|
||||
os.Setenv("CONSOLE_IDP_URL", globalOpenIDConfig.DiscoveryDoc.Issuer)
|
||||
os.Setenv("CONSOLE_IDP_URL", globalOpenIDConfig.URL.String())
|
||||
os.Setenv("CONSOLE_IDP_CLIENT_ID", globalOpenIDConfig.ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SECRET", globalOpenIDConfig.ClientSecret)
|
||||
os.Setenv("CONSOLE_IDP_HMAC_SALT", globalDeploymentID)
|
||||
os.Setenv("CONSOLE_IDP_HMAC_PASSPHRASE", globalOpenIDConfig.ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SCOPES", strings.Join(globalOpenIDConfig.DiscoveryDoc.ScopesSupported, ","))
|
||||
if globalOpenIDConfig.ClaimUserinfo {
|
||||
os.Setenv("CONSOLE_IDP_USERINFO", config.EnableOn)
|
||||
}
|
||||
if globalOpenIDConfig.RedirectURIDynamic {
|
||||
// Enable dynamic redirect-uri's based on incoming 'host' header,
|
||||
// Overrides any other callback URL.
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK_DYNAMIC", config.EnableOn)
|
||||
}
|
||||
if globalOpenIDConfig.RedirectURI != "" {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", globalOpenIDConfig.RedirectURI)
|
||||
} else {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", getConsoleEndpoints()[0]+"/oauth_callback")
|
||||
}
|
||||
}
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalServerRegion)
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
|
||||
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
|
||||
if globalSubnetConfig.License != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_LICENSE", globalSubnetConfig.License)
|
||||
}
|
||||
if globalSubnetConfig.APIKey != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_API_KEY", globalSubnetConfig.APIKey)
|
||||
}
|
||||
if globalSubnetConfig.Proxy != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_PROXY", globalSubnetConfig.Proxy)
|
||||
}
|
||||
}
|
||||
|
||||
func initConsoleServer() (*restapi.Server, error) {
|
||||
@@ -178,16 +245,16 @@ func initConsoleServer() (*restapi.Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
noLog := func(string, ...interface{}) {
|
||||
// nothing to log
|
||||
}
|
||||
|
||||
// Initialize MinIO loggers
|
||||
restapi.LogInfo = noLog
|
||||
restapi.LogError = noLog
|
||||
|
||||
api := operations.NewConsoleAPI(swaggerSpec)
|
||||
api.Logger = noLog
|
||||
|
||||
if !serverDebugLog {
|
||||
// Disable console logging if server debug log is not enabled
|
||||
noLog := func(string, ...interface{}) {}
|
||||
|
||||
restapi.LogInfo = noLog
|
||||
restapi.LogError = noLog
|
||||
api.Logger = noLog
|
||||
}
|
||||
|
||||
server := restapi.NewServer(api)
|
||||
// register all APIs
|
||||
@@ -215,11 +282,6 @@ func initConsoleServer() (*restapi.Server, error) {
|
||||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "gateway") {
|
||||
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
|
||||
uiErr := config.ErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
|
||||
@@ -270,7 +332,7 @@ func checkUpdate(mode string) {
|
||||
return
|
||||
}
|
||||
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
logger.Info(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
@@ -315,7 +377,6 @@ func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() s
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if json flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
@@ -396,7 +457,166 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
}
|
||||
|
||||
type envKV struct {
|
||||
Key string
|
||||
Value string
|
||||
Skip bool
|
||||
}
|
||||
|
||||
func (e envKV) String() string {
|
||||
if e.Skip {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s=%s", e.Key, e.Value)
|
||||
}
|
||||
|
||||
func parsEnvEntry(envEntry string) (envKV, error) {
|
||||
envEntry = strings.TrimSpace(envEntry)
|
||||
if envEntry == "" {
|
||||
// Skip all empty lines
|
||||
return envKV{
|
||||
Skip: true,
|
||||
}, nil
|
||||
}
|
||||
const envSeparator = "="
|
||||
envTokens := strings.SplitN(strings.TrimSpace(strings.TrimPrefix(envEntry, "export")), envSeparator, 2)
|
||||
if len(envTokens) != 2 {
|
||||
return envKV{}, fmt.Errorf("envEntry malformed; %s, expected to be of form 'KEY=value'", envEntry)
|
||||
}
|
||||
|
||||
key := envTokens[0]
|
||||
val := envTokens[1]
|
||||
|
||||
// Remove quotes from the value if found
|
||||
if len(val) >= 2 {
|
||||
quote := val[0]
|
||||
if (quote == '"' || quote == '\'') && val[len(val)-1] == quote {
|
||||
val = val[1 : len(val)-1]
|
||||
}
|
||||
}
|
||||
|
||||
return envKV{
|
||||
Key: key,
|
||||
Value: val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Similar to os.Environ returns a copy of strings representing
|
||||
// the environment values from a file, in the form "key, value".
|
||||
// in a structured form.
|
||||
func minioEnvironFromFile(envConfigFile string) ([]envKV, error) {
|
||||
f, err := os.Open(envConfigFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // ignore if file doesn't exist.
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var ekvs []envKV
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
ekv, err := parsEnvEntry(scanner.Text())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ekv.Skip {
|
||||
// Skips empty lines
|
||||
continue
|
||||
}
|
||||
ekvs = append(ekvs, ekv)
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ekvs, nil
|
||||
}
|
||||
|
||||
func readFromSecret(sp string) (string, error) {
|
||||
// Supports reading path from docker secrets, filename is
|
||||
// relative to /run/secrets/ position.
|
||||
if isFile(pathJoin("/run/secrets/", sp)) {
|
||||
sp = pathJoin("/run/secrets/", sp)
|
||||
}
|
||||
credBuf, err := ioutil.ReadFile(sp)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // ignore if file doesn't exist.
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return string(bytes.TrimSpace(credBuf)), nil
|
||||
}
|
||||
|
||||
func loadEnvVarsFromFiles() {
|
||||
if env.IsSet(config.EnvAccessKeyFile) {
|
||||
accessKey, err := readFromSecret(env.Get(config.EnvAccessKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if accessKey != "" {
|
||||
os.Setenv(config.EnvRootUser, accessKey)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvSecretKeyFile) {
|
||||
secretKey, err := readFromSecret(env.Get(config.EnvSecretKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if secretKey != "" {
|
||||
os.Setenv(config.EnvRootPassword, secretKey)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootUserFile) {
|
||||
rootUser, err := readFromSecret(env.Get(config.EnvRootUserFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if rootUser != "" {
|
||||
os.Setenv(config.EnvRootUser, rootUser)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootPasswordFile) {
|
||||
rootPassword, err := readFromSecret(env.Get(config.EnvRootPasswordFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if rootPassword != "" {
|
||||
os.Setenv(config.EnvRootPassword, rootPassword)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvKMSSecretKeyFile) {
|
||||
kmsSecret, err := readFromSecret(env.Get(config.EnvKMSSecretKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to read the KMS secret key inherited from secret file")
|
||||
}
|
||||
if kmsSecret != "" {
|
||||
os.Setenv(config.EnvKMSSecretKey, kmsSecret)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvConfigEnvFile) {
|
||||
ekvs, err := minioEnvironFromFile(env.Get(config.EnvConfigEnvFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to read the config environment file")
|
||||
}
|
||||
for _, ekv := range ekvs {
|
||||
os.Setenv(ekv.Key, ekv.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
loadEnvVarsFromFiles()
|
||||
|
||||
var err error
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||
if err != nil {
|
||||
@@ -441,6 +661,14 @@ func handleCommonEnvVars() {
|
||||
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
|
||||
}
|
||||
|
||||
if rootDiskSize := env.Get(config.EnvRootDiskThresholdSize, ""); rootDiskSize != "" {
|
||||
size, err := humanize.ParseBytes(rootDiskSize)
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Invalid %s value in environment variable", config.EnvRootDiskThresholdSize))
|
||||
}
|
||||
globalRootDiskThreshold = size
|
||||
}
|
||||
|
||||
domains := env.Get(config.EnvDomain, "")
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
@@ -463,11 +691,11 @@ func handleCommonEnvVars() {
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
if len(publicIPs) != 0 {
|
||||
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
|
||||
var domainIPs = set.NewStringSet()
|
||||
domainIPs := set.NewStringSet()
|
||||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
addrs, err := net.LookupHost(endpoint)
|
||||
addrs, err := globalDNSCache.LookupHost(GlobalContext, endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
|
||||
}
|
||||
@@ -493,11 +721,12 @@ func handleCommonEnvVars() {
|
||||
// in-place update is off.
|
||||
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.EnableOn), config.EnableOff)
|
||||
|
||||
// Check if the supported credential env vars, "MINIO_ROOT_USER" and
|
||||
// "MINIO_ROOT_PASSWORD" are provided
|
||||
// Check if the supported credential env vars,
|
||||
// "MINIO_ROOT_USER" and "MINIO_ROOT_PASSWORD" are provided
|
||||
// Warn user if deprecated environment variables,
|
||||
// "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined
|
||||
// Check all error conditions first
|
||||
//nolint:gocritic
|
||||
if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO")
|
||||
} else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
|
||||
@@ -514,29 +743,29 @@ func handleCommonEnvVars() {
|
||||
// are defined or both are not defined.
|
||||
// Check both cases and authenticate them if correctly defined
|
||||
var user, password string
|
||||
haveRootCredentials := false
|
||||
haveAccessCredentials := false
|
||||
var hasCredentials bool
|
||||
//nolint:gocritic
|
||||
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
user = env.Get(config.EnvRootUser, "")
|
||||
password = env.Get(config.EnvRootPassword, "")
|
||||
haveRootCredentials = true
|
||||
hasCredentials = true
|
||||
} else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
user = env.Get(config.EnvAccessKey, "")
|
||||
password = env.Get(config.EnvSecretKey, "")
|
||||
haveAccessCredentials = true
|
||||
hasCredentials = true
|
||||
}
|
||||
if haveRootCredentials || haveAccessCredentials {
|
||||
if hasCredentials {
|
||||
cred, err := auth.CreateCredentials(user, password)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
if haveAccessCredentials {
|
||||
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
|
||||
" Please use %s and %s",
|
||||
config.EnvAccessKey, config.EnvSecretKey,
|
||||
config.EnvRootUser, config.EnvRootPassword)
|
||||
logStartupMessage(color.RedBold(msg))
|
||||
logger.Info(color.RedBold(msg))
|
||||
}
|
||||
globalActiveCred = cred
|
||||
}
|
||||
@@ -570,7 +799,29 @@ func handleCommonEnvVars() {
|
||||
endpoints = append(endpoints, strings.Join(lbls, ""))
|
||||
}
|
||||
}
|
||||
certificate, err := tls.LoadX509KeyPair(env.Get(config.EnvKESClientCert, ""), env.Get(config.EnvKESClientKey, ""))
|
||||
// Manually load the certificate and private key into memory.
|
||||
// We need to check whether the private key is encrypted, and
|
||||
// if so, decrypt it using the user-provided password.
|
||||
certBytes, err := os.ReadFile(env.Get(config.EnvKESClientCert, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to load KES client certificate as specified by the shell environment")
|
||||
}
|
||||
keyBytes, err := os.ReadFile(env.Get(config.EnvKESClientKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to load KES client private key as specified by the shell environment")
|
||||
}
|
||||
privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes))
|
||||
if len(rest) != 0 {
|
||||
logger.Fatal(errors.New("private key contains additional data"), "Unable to load KES client private key as specified by the shell environment")
|
||||
}
|
||||
if x509.IsEncryptedPEMBlock(privateKeyPEM) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(config.EnvKESClientPassword, "")))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to decrypt KES client private key as specified by the shell environment")
|
||||
}
|
||||
keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes})
|
||||
}
|
||||
certificate, err := tls.X509KeyPair(certBytes, keyBytes)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to load KES client certificate as specified by the shell environment")
|
||||
}
|
||||
@@ -579,7 +830,7 @@ func handleCommonEnvVars() {
|
||||
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get())))
|
||||
}
|
||||
|
||||
var defaultKeyID = env.Get(config.EnvKESKeyName, "")
|
||||
defaultKeyID := env.Get(config.EnvKESKeyName, "")
|
||||
KMS, err := kms.NewWithConfig(kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: defaultKeyID,
|
||||
@@ -599,17 +850,6 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
}
|
||||
|
||||
if tiers := env.Get("_MINIO_DEBUG_REMOTE_TIERS_IMMEDIATELY", ""); tiers != "" {
|
||||
globalDebugRemoteTiersImmediately = strings.Split(tiers, ",")
|
||||
}
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
if globalConsoleSys != nil {
|
||||
globalConsoleSys.Send(msg, string(logger.All))
|
||||
}
|
||||
logger.StartupMessage(msg)
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
@@ -687,6 +927,13 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
|
||||
// Certs might be symlinks, reload them every 10 seconds.
|
||||
manager.UpdateReloadDuration(10 * time.Second)
|
||||
|
||||
// syscall.SIGHUP to reload the certs.
|
||||
manager.ReloadOnSignal(syscall.SIGHUP)
|
||||
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -699,3 +946,32 @@ func contextCanceled(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// bgContext returns a context that can be used for async operations.
|
||||
// Cancellation/timeouts are removed, so parent cancellations/timeout will
|
||||
// not propagate from parent.
|
||||
// Context values are preserved.
|
||||
// This can be used for goroutines that live beyond the parent context.
|
||||
func bgContext(parent context.Context) context.Context {
|
||||
return bgCtx{parent: parent}
|
||||
}
|
||||
|
||||
type bgCtx struct {
|
||||
parent context.Context
|
||||
}
|
||||
|
||||
func (a bgCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a bgCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (a bgCtx) Value(key interface{}) interface{} {
|
||||
return a.parent.Value(key)
|
||||
}
|
||||
|
||||
161
cmd/common-main_test.go
Normal file
161
cmd/common-main_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_readFromSecret(t *testing.T) {
|
||||
testCases := []struct {
|
||||
content string
|
||||
expectedErr bool
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
"value\n",
|
||||
false,
|
||||
"value",
|
||||
},
|
||||
{
|
||||
" \t\n Hello, Gophers \n\t\r\n",
|
||||
false,
|
||||
"Hello, Gophers",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "testfile")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
tmpfile.WriteString(testCase.content)
|
||||
tmpfile.Sync()
|
||||
tmpfile.Close()
|
||||
|
||||
value, err := readFromSecret(tmpfile.Name())
|
||||
if err != nil && !testCase.expectedErr {
|
||||
t.Error(err)
|
||||
}
|
||||
if err == nil && testCase.expectedErr {
|
||||
t.Error(errors.New("expected error, found success"))
|
||||
}
|
||||
if value != testCase.expectedValue {
|
||||
t.Errorf("Expected %s, got %s", testCase.expectedValue, value)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_minioEnvironFromFile(t *testing.T) {
|
||||
testCases := []struct {
|
||||
content string
|
||||
expectedErr bool
|
||||
expectedEkvs []envKV
|
||||
}{
|
||||
{
|
||||
`
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Key: "MINIO_ROOT_PASSWORD",
|
||||
Value: "minio123",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Value with double quotes
|
||||
{
|
||||
`export MINIO_ROOT_USER="minio"`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Value with single quotes
|
||||
{
|
||||
`export MINIO_ROOT_USER='minio'`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`
|
||||
MINIO_ROOT_USER=minio
|
||||
MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Key: "MINIO_ROOT_PASSWORD",
|
||||
Value: "minio123",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`
|
||||
export MINIO_ROOT_USERminio
|
||||
export MINIO_ROOT_PASSWORD=minio123`,
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "testfile")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
tmpfile.WriteString(testCase.content)
|
||||
tmpfile.Sync()
|
||||
tmpfile.Close()
|
||||
|
||||
ekvs, err := minioEnvironFromFile(tmpfile.Name())
|
||||
if err != nil && !testCase.expectedErr {
|
||||
t.Error(err)
|
||||
}
|
||||
if err == nil && testCase.expectedErr {
|
||||
t.Error(errors.New("expected error, found success"))
|
||||
}
|
||||
if !reflect.DeepEqual(ekvs, testCase.expectedEkvs) {
|
||||
t.Errorf("expected %v, got %v", testCase.expectedEkvs, ekvs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -29,27 +29,31 @@ import (
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
// Read entire content by setting size to -1
|
||||
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
func readConfigWithMetadata(ctx context.Context, store objectIO, configFile string) ([]byte, ObjectInfo, error) {
|
||||
r, err := store.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
return nil, ObjectInfo{}, errConfigNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return nil, errConfigNotFound
|
||||
return nil, ObjectInfo{}, errConfigNotFound
|
||||
}
|
||||
return buf, nil
|
||||
return buf, r.ObjInfo, nil
|
||||
}
|
||||
|
||||
func readConfig(ctx context.Context, store objectIO, configFile string) ([]byte, error) {
|
||||
buf, _, err := readConfigWithMetadata(ctx, store, configFile)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
type objectDeleter interface {
|
||||
@@ -57,20 +61,22 @@ type objectDeleter interface {
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) error {
|
||||
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
|
||||
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{
|
||||
DeletePrefix: true,
|
||||
})
|
||||
if err != nil && isErrObjectNotFound(err) {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
func saveConfig(ctx context.Context, store objectIO, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
_, err = store.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -34,35 +34,38 @@ import (
|
||||
"github.com/minio/minio/internal/config/heal"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
xtls "github.com/minio/minio/internal/config/identity/tls"
|
||||
"github.com/minio/minio/internal/config/notify"
|
||||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
"github.com/minio/minio/internal/config/scanner"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/logger/target/http"
|
||||
"github.com/minio/minio/internal/logger/target/kafka"
|
||||
"github.com/minio/pkg/env"
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
var kvs = map[string]config.KVS{
|
||||
kvs := map[string]config.KVS{
|
||||
config.EtcdSubSys: etcd.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.CompressionSubSys: compress.DefaultKVS,
|
||||
config.IdentityLDAPSubSys: xldap.DefaultKVS,
|
||||
config.IdentityOpenIDSubSys: openid.DefaultKVS,
|
||||
config.IdentityTLSSubSys: xtls.DefaultKVS,
|
||||
config.PolicyOPASubSys: opa.DefaultKVS,
|
||||
config.SiteSubSys: config.DefaultSiteKVS,
|
||||
config.RegionSubSys: config.DefaultRegionKVS,
|
||||
config.APISubSys: api.DefaultKVS,
|
||||
config.CredentialsSubSys: config.DefaultCredentialKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultLoggerWebhookKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditWebhookKVS,
|
||||
config.AuditKafkaSubSys: logger.DefaultAuditKafkaKVS,
|
||||
config.HealSubSys: heal.DefaultKVS,
|
||||
config.ScannerSubSys: scanner.DefaultKVS,
|
||||
config.SubnetSubSys: subnet.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
@@ -73,10 +76,10 @@ func initHelp() {
|
||||
config.RegisterDefaultKVS(kvs)
|
||||
|
||||
// Captures help for each sub-system
|
||||
var helpSubSys = config.HelpKVS{
|
||||
helpSubSys := config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: config.RegionSubSys,
|
||||
Description: "label the location of the server",
|
||||
Key: config.SiteSubSys,
|
||||
Description: "label the server and its location",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CacheSubSys,
|
||||
@@ -98,6 +101,10 @@ func initHelp() {
|
||||
Key: config.IdentityLDAPSubSys,
|
||||
Description: "enable LDAP SSO support",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.IdentityTLSSubSys,
|
||||
Description: "enable X.509 TLS certificate SSO support",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.PolicyOPASubSys,
|
||||
Description: "[DEPRECATED] enable external OPA for policy enforcement",
|
||||
@@ -179,6 +186,12 @@ func initHelp() {
|
||||
Description: "publish bucket notifications to Redis datastores",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.SubnetSubSys,
|
||||
Type: "string",
|
||||
Description: "set subnet config for the cluster e.g. api key",
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
@@ -190,8 +203,9 @@ func initHelp() {
|
||||
}
|
||||
}
|
||||
|
||||
var helpMap = map[string]config.HelpKVS{
|
||||
helpMap := map[string]config.HelpKVS{
|
||||
"": helpSubSys, // Help for all sub-systems.
|
||||
config.SiteSubSys: config.SiteHelp,
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
config.APISubSys: api.Help,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
@@ -202,6 +216,7 @@ func initHelp() {
|
||||
config.ScannerSubSys: scanner.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.IdentityTLSSubSys: xtls.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
config.LoggerWebhookSubSys: logger.Help,
|
||||
config.AuditWebhookSubSys: logger.HelpWebhook,
|
||||
@@ -216,9 +231,20 @@ func initHelp() {
|
||||
config.NotifyRedisSubSys: notify.HelpRedis,
|
||||
config.NotifyWebhookSubSys: notify.HelpWebhook,
|
||||
config.NotifyESSubSys: notify.HelpES,
|
||||
config.SubnetSubSys: subnet.HelpSubnet,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
|
||||
// save top-level help for deprecated sub-systems in a separate map.
|
||||
deprecatedHelpKVMap := map[string]config.HelpKV{
|
||||
config.RegionSubSys: {
|
||||
Key: config.RegionSubSys,
|
||||
Description: "[DEPRECATED - use `site` instead] label the location of the server",
|
||||
},
|
||||
}
|
||||
|
||||
config.RegisterHelpDeprecatedSubSys(deprecatedHelpKVMap)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -227,60 +253,55 @@ var (
|
||||
globalServerConfigMu sync.RWMutex
|
||||
)
|
||||
|
||||
func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
// Disable merging env values with config for validation.
|
||||
env.SetEnvOff()
|
||||
|
||||
// Enable env values to validate KMS.
|
||||
defer env.SetEnvOn()
|
||||
|
||||
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := config.LookupRegion(s[config.RegionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
for _, setDriveCount := range setDriveCounts {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) error {
|
||||
switch subSys {
|
||||
case config.CredentialsSubSys:
|
||||
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.SiteSubSys:
|
||||
if _, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.APISubSys:
|
||||
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.StorageClassSubSys:
|
||||
if globalIsErasure {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
for _, setDriveCount := range objAPI.SetDriveCounts() {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
case config.CacheSubSys:
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.CompressionSubSys:
|
||||
compCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
case config.HealSubSys:
|
||||
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.ScannerSubSys:
|
||||
if _, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.EtcdSubSys:
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -292,15 +313,13 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
}
|
||||
etcdClnt.Close()
|
||||
}
|
||||
}
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs)
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityLDAPSubSys:
|
||||
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -311,21 +330,61 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
case config.IdentityTLSSubSys:
|
||||
if _, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
if _, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.PolicyOPASubSys:
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if config.LoggerSubSystems.Contains(subSys) {
|
||||
if err := logger.ValidateSubSysConfig(s, subSys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
if config.NotifySubSystems.Contains(subSys) {
|
||||
if err := notify.TestSubSysNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs(), subSys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := logger.LookupConfig(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
func validateConfig(s config.Config, subSys string) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
// Disable merging env values with config for validation.
|
||||
env.SetEnvOff()
|
||||
|
||||
// Enable env values to validate KMS.
|
||||
defer env.SetEnvOn()
|
||||
if subSys != "" {
|
||||
return validateSubSysConfig(s, subSys, objAPI)
|
||||
}
|
||||
|
||||
// No sub-system passed. Validate all of them.
|
||||
for _, ss := range config.SubSystems.ToSlice() {
|
||||
if err := validateSubSysConfig(s, ss, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
ctx := GlobalContext
|
||||
|
||||
var err error
|
||||
@@ -337,7 +396,15 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
}
|
||||
}
|
||||
|
||||
if dnsURL, dnsUser, dnsPass, ok := env.LookupEnv(config.EnvDNSWebhook); ok {
|
||||
dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize remote webhook DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
}
|
||||
if err == nil && dnsURL != "" {
|
||||
globalDNSConfig, err = dns.NewOperatorDNS(dnsURL,
|
||||
dns.Authentication(dnsUser, dnsPass),
|
||||
dns.RootCAs(globalRootCAs))
|
||||
@@ -402,9 +469,9 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
// but not federation.
|
||||
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
|
||||
|
||||
globalServerRegion, err = config.LookupRegion(s[config.RegionSubSys][config.Default])
|
||||
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
|
||||
}
|
||||
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
@@ -412,14 +479,13 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
if globalIsErasure && objAPI != nil {
|
||||
setDriveCounts := objAPI.SetDriveCounts()
|
||||
for i, setDriveCount := range setDriveCounts {
|
||||
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
@@ -457,8 +523,17 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
logger.Fatal(errors.New("no KMS configured"), "MINIO_KMS_AUTO_ENCRYPTION requires a valid KMS configuration")
|
||||
}
|
||||
|
||||
globalSTSTLSConfig, err = xtls.Lookup(s[config.IdentityTLSSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err))
|
||||
}
|
||||
|
||||
if globalSTSTLSConfig.InsecureSkipVerify {
|
||||
logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify))
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
|
||||
}
|
||||
@@ -478,46 +553,9 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err))
|
||||
}
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
loggerUserAgent := getUserAgent(getMinioMode())
|
||||
|
||||
loggerCfg, err := logger.LookupConfig(s)
|
||||
globalSubnetConfig, err = subnet.LookupConfig(s[config.SubnetSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = loggerUserAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
// Enable http logging
|
||||
if err = logger.AddTarget(http.New(l)); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize console HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.AuditWebhook {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = loggerUserAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
// Enable http audit logging
|
||||
if err = logger.AddAuditTarget(http.New(l)); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.AuditKafka {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
// Enable Kafka audit logging
|
||||
if err = logger.AddAuditTarget(kafka.New(l)); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit Kafka target: %w", err))
|
||||
}
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
}
|
||||
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), false)
|
||||
@@ -531,68 +569,123 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
}
|
||||
|
||||
// Apply dynamic config values
|
||||
logger.LogIf(ctx, applyDynamicConfig(ctx, newObjectLayerFn(), s))
|
||||
if err := applyDynamicConfig(ctx, objAPI, s); err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize dynamic configuration")
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s config.Config, subSys string) error {
|
||||
switch subSys {
|
||||
case config.APISubSys:
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
var setDriveCounts []int
|
||||
if objAPI != nil {
|
||||
setDriveCounts = objAPI.SetDriveCounts()
|
||||
}
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
case config.CompressionSubSys:
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
// Validate if the object layer supports compression.
|
||||
if objAPI != nil {
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
case config.HealSubSys:
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
globalHealConfig.Update(healCfg)
|
||||
case config.ScannerSubSys:
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
case config.LoggerWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.LoggerWebhookSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
|
||||
}
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
loggerCfg.HTTP[n] = l
|
||||
}
|
||||
}
|
||||
err = logger.UpdateSystemTargets(loggerCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %w", err))
|
||||
}
|
||||
case config.AuditWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.AuditWebhookSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
|
||||
}
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.AuditWebhook {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
loggerCfg.AuditWebhook[n] = l
|
||||
}
|
||||
}
|
||||
|
||||
err = logger.UpdateAuditWebhookTargets(loggerCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %w", err))
|
||||
}
|
||||
case config.AuditKafkaSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.AuditKafkaSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
|
||||
}
|
||||
for n, l := range loggerCfg.AuditKafka {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
loggerCfg.AuditKafka[n] = l
|
||||
}
|
||||
}
|
||||
err = logger.UpdateAuditKafkaTargets(loggerCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %w", err))
|
||||
}
|
||||
}
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
if globalServerConfig != nil {
|
||||
globalServerConfig[subSys] = s[subSys]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {
|
||||
if objAPI == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read all dynamic configs.
|
||||
// API
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
// Compression
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
|
||||
// Validate if the object layer supports compression.
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
|
||||
// Heal
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
|
||||
// Scanner
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
|
||||
// Apply configurations.
|
||||
// We should not fail after this.
|
||||
globalAPIConfig.init(apiConfig, objAPI.SetDriveCounts())
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
|
||||
globalHealConfigMu.Lock()
|
||||
globalHealConfig = healCfg
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
|
||||
// Update all dynamic config values in memory.
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
if globalServerConfig != nil {
|
||||
for k := range config.SubSystemsDynamic {
|
||||
globalServerConfig[k] = s[k]
|
||||
for subSys := range config.SubSystemsDynamic {
|
||||
err := applyDynamicConfigForSubSys(ctx, objAPI, s, subSys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -620,7 +713,10 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
|
||||
subSysHelp, ok := config.HelpSubSysMap[""].Lookup(subSys)
|
||||
if !ok {
|
||||
return Help{}, config.Errorf("unknown sub-system %s", subSys)
|
||||
subSysHelp, ok = config.HelpDeprecatedSubSysMap[subSys]
|
||||
if !ok {
|
||||
return Help{}, config.Errorf("unknown sub-system %s", subSys)
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := config.HelpSubSysMap[subSys]
|
||||
@@ -642,9 +738,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
// to list the ENV, for regular k/v EnableKey is
|
||||
// implicit, for ENVs we cannot make it implicit.
|
||||
if subSysHelp.MultipleTargets {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
|
||||
}, config.EnvWordDelimiter)
|
||||
envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(madmin.EnableKey)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
|
||||
@@ -653,9 +747,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
})
|
||||
}
|
||||
for _, hkv := range h {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
|
||||
}, config.EnvWordDelimiter)
|
||||
envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(hkv.Key)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: hkv.Description,
|
||||
@@ -706,7 +798,7 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCounts())
|
||||
lookupConfigs(srvCfg, objAPI)
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user