Compare commits
582 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e3071157f0 | ||
|
|
c07af89e48 | ||
|
|
9c846106fa | ||
|
|
cf94d1f1f1 | ||
|
|
6187440f35 | ||
|
|
57b7c3494f | ||
|
|
dda18c28c5 | ||
|
|
f8d6eaaa96 | ||
|
|
47d4fabb58 | ||
|
|
80039f60d5 | ||
|
|
5a5e9b8a89 | ||
|
|
b7ed3b77bd | ||
|
|
75b925c326 | ||
|
|
91d419ee6c | ||
|
|
23345098ea | ||
|
|
7ce91ea1a1 | ||
|
|
41079f1015 | ||
|
|
712dfa40cd | ||
|
|
decfd6108c | ||
|
|
b890bbfa63 | ||
|
|
0e3a570b85 | ||
|
|
7060c809c0 | ||
|
|
9dbfd84c5b | ||
|
|
fce380a044 | ||
|
|
46ba15ab03 | ||
|
|
1e39ca39c3 | ||
|
|
80ef1ae51c | ||
|
|
4d0715d226 | ||
|
|
8a274169da | ||
|
|
21d8298fe1 | ||
|
|
5d6f6d8d5b | ||
|
|
bacf6156c1 | ||
|
|
1d1b213f1f | ||
|
|
1f11af42f1 | ||
|
|
a026c8748f | ||
|
|
9f7d89b3cd | ||
|
|
92a77cc78e | ||
|
|
b0c84e3de7 | ||
|
|
bbc914e174 | ||
|
|
3fca4055d2 | ||
|
|
66afa16aed | ||
|
|
9b0a8de7de | ||
|
|
04bbede17d | ||
|
|
0e3bafcc54 | ||
|
|
b48f719b8e | ||
|
|
289fcbd08c | ||
|
|
f6875bb893 | ||
|
|
7e803adf13 | ||
|
|
5b5deee5b3 | ||
|
|
7dae4cb685 | ||
|
|
27fad98179 | ||
|
|
58f7e3a829 | ||
|
|
4a15bd8ff8 | ||
|
|
b030ef1aca | ||
|
|
cc46a99f97 | ||
|
|
becec6cb6b | ||
|
|
bc33db9fc0 | ||
|
|
7d4579e737 | ||
|
|
b7c90751b0 | ||
|
|
88fd1cba71 | ||
|
|
e43cc316ff | ||
|
|
e3f24a29fa | ||
|
|
890e526bde | ||
|
|
16ce455fca | ||
|
|
29b7164468 | ||
|
|
acdd03f609 | ||
|
|
03b35ecdd0 | ||
|
|
5307e18085 | ||
|
|
2cea944cdb | ||
|
|
c08540c7b7 | ||
|
|
3934700a08 | ||
|
|
0913eb6655 | ||
|
|
1bfbe354f5 | ||
|
|
2d78e20120 | ||
|
|
77210513c9 | ||
|
|
2e6f8bdf19 | ||
|
|
25144fedd5 | ||
|
|
27f64dd9a4 | ||
|
|
9d7648f02f | ||
|
|
1ef8babfef | ||
|
|
4ea7bf0510 | ||
|
|
5dcf1d13a9 | ||
|
|
94d37d05e5 | ||
|
|
0cbdc458c5 | ||
|
|
c1437c7b46 | ||
|
|
f357f65d04 | ||
|
|
ef8e952fc4 | ||
|
|
a2bc383e15 | ||
|
|
23930355a7 | ||
|
|
bb9f41e613 | ||
|
|
bc110d8055 | ||
|
|
b23b19e5c3 | ||
|
|
65b1a4282e | ||
|
|
1dbb3f6f43 | ||
|
|
af3dc25dfe | ||
|
|
5a0c0079a1 | ||
|
|
af8f563ed3 | ||
|
|
93af4a4864 | ||
|
|
28f188e3ef | ||
|
|
b29224f62f | ||
|
|
d756da41b9 | ||
|
|
cdab4a3b85 | ||
|
|
b88c57ba93 | ||
|
|
1a5496eced | ||
|
|
b264e6a191 | ||
|
|
ae1b495262 | ||
|
|
16939ca192 | ||
|
|
60cd513a33 | ||
|
|
27d94c64ed | ||
|
|
21a0f857d3 | ||
|
|
03a6e8aee2 | ||
|
|
d0862ddf86 | ||
|
|
4afbb89774 | ||
|
|
5ec57a9533 | ||
|
|
f088e8960b | ||
|
|
27dec42ad6 | ||
|
|
b70053090c | ||
|
|
f10e2254ae | ||
|
|
1f92fc3fc0 | ||
|
|
f71b114a84 | ||
|
|
e3e0532613 | ||
|
|
2c0f121550 | ||
|
|
6f41cff75a | ||
|
|
9b39616c1b | ||
|
|
fad3d66093 | ||
|
|
ff99ef74c8 | ||
|
|
6990e73b11 | ||
|
|
860a1237ab | ||
|
|
97b5bf1fb7 | ||
|
|
ed3418c046 | ||
|
|
a2230868e0 | ||
|
|
71bab74148 | ||
|
|
661ea57907 | ||
|
|
1f18efb0ba | ||
|
|
8ae46bce93 | ||
|
|
f19a414e09 | ||
|
|
0ee2933234 | ||
|
|
9890f579f8 | ||
|
|
2ee337ead5 | ||
|
|
362e14fa1a | ||
|
|
524fe62594 | ||
|
|
3c87e1e60d | ||
|
|
0cac868a36 | ||
|
|
2480c66857 | ||
|
|
186c477f3c | ||
|
|
570670be8c | ||
|
|
22b7226581 | ||
|
|
f16f715b59 | ||
|
|
75adb787c4 | ||
|
|
6123377e66 | ||
|
|
778cccb15d | ||
|
|
0256dae657 | ||
|
|
88a93838de | ||
|
|
0855988427 | ||
|
|
84b121bbe1 | ||
|
|
48fb7b0dd7 | ||
|
|
01e550a9be | ||
|
|
63a2e0bab6 | ||
|
|
24657859a8 | ||
|
|
67d07e895c | ||
|
|
d7df6bc738 | ||
|
|
a4e1de93a7 | ||
|
|
41be557f0c | ||
|
|
9417fd933e | ||
|
|
067d21d0f2 | ||
|
|
3882da6ac5 | ||
|
|
77b780b8ca | ||
|
|
127e8bf3b6 | ||
|
|
74faed166a | ||
|
|
dbd05d6e82 | ||
|
|
b5d35c7e09 | ||
|
|
c39eb3bacd | ||
|
|
57fad9148c | ||
|
|
0f88cdc80e | ||
|
|
e2a9949b16 | ||
|
|
38e3c7a8f7 | ||
|
|
67f166fa02 | ||
|
|
c7df5fb119 | ||
|
|
a4be47d7ad | ||
|
|
aaea94a48d | ||
|
|
c3d9c45f58 | ||
|
|
a2a48cc065 | ||
|
|
cf407f7176 | ||
|
|
d6dd17a483 | ||
|
|
a66071099c | ||
|
|
9a6e569412 | ||
|
|
7dfa565d00 | ||
|
|
d2e5f01542 | ||
|
|
f4e373e0d2 | ||
|
|
c8691db2b7 | ||
|
|
affe51cb19 | ||
|
|
57118919d2 | ||
|
|
7db05a80dd | ||
|
|
a8ba71edef | ||
|
|
45a99c3fd3 | ||
|
|
58e6b83e95 | ||
|
|
f556a72fe2 | ||
|
|
cd7a5cab8a | ||
|
|
67b5e0dbe8 | ||
|
|
b68f0cbde4 | ||
|
|
ebc3627c73 | ||
|
|
295730408b | ||
|
|
5a9f133491 | ||
|
|
f30afa4956 | ||
|
|
171cedf0f0 | ||
|
|
27d8ef14f8 | ||
|
|
f6d13f57bb | ||
|
|
5f36167f1a | ||
|
|
8fb4ae916c | ||
|
|
48da4aeee0 | ||
|
|
07df9eecda | ||
|
|
7f214a0e46 | ||
|
|
e1a0a1e73c | ||
|
|
1278b0ec73 | ||
|
|
3e9bd931ed | ||
|
|
9d588319dd | ||
|
|
0012ca8ca5 | ||
|
|
288e276abe | ||
|
|
f22e745514 | ||
|
|
070c31eac5 | ||
|
|
1a56ebea70 | ||
|
|
70e1cbda21 | ||
|
|
1ede3967c1 | ||
|
|
60f2df54e0 | ||
|
|
ba708f51f2 | ||
|
|
b106b1c131 | ||
|
|
0df31f63ab | ||
|
|
64d4da5a37 | ||
|
|
7aec38a73e | ||
|
|
a2fd8caa69 | ||
|
|
f546636c52 | ||
|
|
38ccc4f672 | ||
|
|
04e669a6be | ||
|
|
cc3f139d1f | ||
|
|
d50442da01 | ||
|
|
404b05a44c | ||
|
|
3d7c1ad31d | ||
|
|
d300e775a6 | ||
|
|
7ee2d1c339 | ||
|
|
54a98773f8 | ||
|
|
737a3f0bad | ||
|
|
3bd9636a5b | ||
|
|
76b21de0c6 | ||
|
|
dabb058167 | ||
|
|
f394313fee | ||
|
|
b7c5e45fff | ||
|
|
0a224654c2 | ||
|
|
47e4a36d7e | ||
|
|
e420a1de4d | ||
|
|
62dc0f7698 | ||
|
|
2d31d92271 | ||
|
|
1981fe2072 | ||
|
|
f68bd37acf | ||
|
|
76877eb6fa | ||
|
|
3d66d053c7 | ||
|
|
0d3ae3810f | ||
|
|
0e31cff762 | ||
|
|
c27110e37d | ||
|
|
89441a22aa | ||
|
|
557135185c | ||
|
|
4d39fd4165 | ||
|
|
f4c03e56b8 | ||
|
|
d2b6aa9033 | ||
|
|
5dd40b9377 | ||
|
|
001b77e7e1 | ||
|
|
9d91d32d82 | ||
|
|
a60ac7ca17 | ||
|
|
42ba0da6b0 | ||
|
|
f527c708f2 | ||
|
|
6f474982ed | ||
|
|
fd6cd52728 | ||
|
|
c9e49f4366 | ||
|
|
46fd9f4a53 | ||
|
|
4da641533d | ||
|
|
79df2c7ce7 | ||
|
|
3e28af1723 | ||
|
|
866a95de38 | ||
|
|
6aa0574a53 | ||
|
|
bb97eafa82 | ||
|
|
51d5efee1b | ||
|
|
c980804514 | ||
|
|
b883803b21 | ||
|
|
7e3a7d7044 | ||
|
|
9ad6012782 | ||
|
|
5a96cbbeaa | ||
|
|
416977436e | ||
|
|
54ec0a1308 | ||
|
|
ebd78e983f | ||
|
|
1cf726348f | ||
|
|
41f75e6d1b | ||
|
|
0e3037631f | ||
|
|
6fbf4f96b6 | ||
|
|
e35709a99e | ||
|
|
f3602d7d08 | ||
|
|
526e10a2e0 | ||
|
|
0b21734571 | ||
|
|
499872f31d | ||
|
|
5cc16e098c | ||
|
|
364e27d5f2 | ||
|
|
1f4e0bd17c | ||
|
|
0557e18472 | ||
|
|
cfd66ab8c3 | ||
|
|
691b763613 | ||
|
|
3ddb501190 | ||
|
|
997e808088 | ||
|
|
13441ad0f8 | ||
|
|
aa508591c1 | ||
|
|
818f0201fc | ||
|
|
890f43ffa5 | ||
|
|
e270ab65b3 | ||
|
|
926373f9c1 | ||
|
|
111c6177d2 | ||
|
|
91f72f25ab | ||
|
|
da540ccf8c | ||
|
|
d6396f82fe | ||
|
|
4fa250a6a1 | ||
|
|
b42cfcea60 | ||
|
|
aca6dfbd60 | ||
|
|
5f7e6d03ff | ||
|
|
88ad742da0 | ||
|
|
a8d4042853 | ||
|
|
44a9339c0a | ||
|
|
113c7ff49a | ||
|
|
40dbe243d9 | ||
|
|
d422d24278 | ||
|
|
109c927dad | ||
|
|
de400f3473 | ||
|
|
8144a125ce | ||
|
|
3e34e41a5a | ||
|
|
2270887d43 | ||
|
|
c6431f9a04 | ||
|
|
88c0d0120c | ||
|
|
44fefe5b9f | ||
|
|
878d368cea | ||
|
|
f2bd026d0e | ||
|
|
518612492c | ||
|
|
81e43b87c2 | ||
|
|
a02e17f15c | ||
|
|
c76f86fdbd | ||
|
|
5b7c00ff52 | ||
|
|
b9f0046ee7 | ||
|
|
3b79f7e4ae | ||
|
|
85d2df02b9 | ||
|
|
2f1e8ba612 | ||
|
|
84c690cb07 | ||
|
|
239bbad7ab | ||
|
|
4be8023408 | ||
|
|
83e8da57b8 | ||
|
|
dcff6c996d | ||
|
|
0a66a6f1e5 | ||
|
|
e82a5c5c54 | ||
|
|
92fdcafb66 | ||
|
|
b9aae1aaae | ||
|
|
7d70afc937 | ||
|
|
12b63061c2 | ||
|
|
038fdeea83 | ||
|
|
0b6225bcc3 | ||
|
|
f286ef8e17 | ||
|
|
b120bcb60a | ||
|
|
be34fc9134 | ||
|
|
8591d17d82 | ||
|
|
f6190d6751 | ||
|
|
f0fc77fded | ||
|
|
f56cac6381 | ||
|
|
4f35054d29 | ||
|
|
d29df6714a | ||
|
|
7460fb8349 | ||
|
|
a7c430355a | ||
|
|
1df1517449 | ||
|
|
f7c357ebad | ||
|
|
20c60aae68 | ||
|
|
b14527b7af | ||
|
|
f840080e5b | ||
|
|
2c6983a2f1 | ||
|
|
acfb83ec5e | ||
|
|
3db931dc0e | ||
|
|
8309ddd486 | ||
|
|
21c868a646 | ||
|
|
ffe9acfe4a | ||
|
|
24d904d194 | ||
|
|
b280a37c4d | ||
|
|
906548d0ba | ||
|
|
1485a5bf3b | ||
|
|
9ec197f2e8 | ||
|
|
d21466f595 | ||
|
|
4f3290309e | ||
|
|
d6fe0f61a9 | ||
|
|
5a22f2cf0b | ||
|
|
42d11d9e7d | ||
|
|
e49c184595 | ||
|
|
99d87c5ca2 | ||
|
|
4c0f48c548 | ||
|
|
4ce6d35e30 | ||
|
|
81bf0c66c6 | ||
|
|
34dc725d26 | ||
|
|
a5db4ca092 | ||
|
|
61029fe20b | ||
|
|
fee3f88cb5 | ||
|
|
932500e43d | ||
|
|
55d4cdd464 | ||
|
|
fe3e47b1e8 | ||
|
|
9ca25bd48f | ||
|
|
91e0823ff0 | ||
|
|
142c6b11b3 | ||
|
|
ef0b8367b5 | ||
|
|
d1bfb4d2c0 | ||
|
|
3b5d6f003f | ||
|
|
26c457860b | ||
|
|
d0515031c7 | ||
|
|
08f4a0a816 | ||
|
|
28f95f1fbe | ||
|
|
c791de0e1e | ||
|
|
36b5426f6e | ||
|
|
1e72e9b1cd | ||
|
|
9739e55d0f | ||
|
|
1cddbc80cf | ||
|
|
3da9ee15d3 | ||
|
|
1e2fac054c | ||
|
|
914bfb2d9c | ||
|
|
40244994ad | ||
|
|
556ae07857 | ||
|
|
17fd71164c | ||
|
|
81d19156e9 | ||
|
|
7b82411e6f | ||
|
|
fb268add7a | ||
|
|
7700973538 | ||
|
|
54e25a0251 | ||
|
|
79b3a1fe4e | ||
|
|
faf013ec84 | ||
|
|
7152915318 | ||
|
|
886262e58a | ||
|
|
9c5d9ae376 | ||
|
|
3d2bc15e9a | ||
|
|
20c43c447d | ||
|
|
4caed7cc0d | ||
|
|
5b68f8ea6a | ||
|
|
8378bc9958 | ||
|
|
367cb48096 | ||
|
|
661b263e77 | ||
|
|
07c5e72cdb | ||
|
|
7752cdbfaf | ||
|
|
4545ecad58 | ||
|
|
ac74237f01 | ||
|
|
5a36179c19 | ||
|
|
82d73f387d | ||
|
|
e8c6314770 | ||
|
|
087c1b98dc | ||
|
|
68c5ad83fb | ||
|
|
5acc8c0134 | ||
|
|
c897b6a82d | ||
|
|
d008e90d50 | ||
|
|
ea820b30bf | ||
|
|
03725dc015 | ||
|
|
0a6f9bc1eb | ||
|
|
1946922de3 | ||
|
|
edf1f4233b | ||
|
|
f4b55ea7a7 | ||
|
|
8dfd1f03e9 | ||
|
|
acf26c5ab7 | ||
|
|
d9800c8135 | ||
|
|
02bef7560f | ||
|
|
07dd0692b6 | ||
|
|
4f3317effe | ||
|
|
9afdbe3648 | ||
|
|
fe0df01448 | ||
|
|
12e6907512 | ||
|
|
5aef492b4c | ||
|
|
5d7ed8ff7d | ||
|
|
b1754fc5ff | ||
|
|
19bbf3e142 | ||
|
|
e1755275a0 | ||
|
|
520037e721 | ||
|
|
cbb0828ab8 | ||
|
|
8774d10bdf | ||
|
|
df9f479d58 | ||
|
|
8bb52c9c2a | ||
|
|
947c423824 | ||
|
|
c3d24fb26d | ||
|
|
112f9ae087 | ||
|
|
01b9ff54d9 | ||
|
|
64a1904136 | ||
|
|
bce6864785 | ||
|
|
ecd54b4cba | ||
|
|
ca2b288a4b | ||
|
|
1016fbb8f9 | ||
|
|
be3f81c7ec | ||
|
|
9f3c151c3c | ||
|
|
9735f3d8f2 | ||
|
|
34680c5ccf | ||
|
|
58934e5881 | ||
|
|
ad3f98b8e7 | ||
|
|
7c33a33ef3 | ||
|
|
3dfcca68e6 | ||
|
|
73b74c94a1 | ||
|
|
18338d60d5 | ||
|
|
e106070640 | ||
|
|
091a7ae359 | ||
|
|
70160aeab3 | ||
|
|
1aa08f594d | ||
|
|
14d8a931fe | ||
|
|
30ba85bc67 | ||
|
|
caadcc3ed8 | ||
|
|
26f55472c6 | ||
|
|
79a58e275c | ||
|
|
900e584514 | ||
|
|
bb639d9f29 | ||
|
|
15dcacc1fc | ||
|
|
6d53e3c2d7 | ||
|
|
8ed7346273 | ||
|
|
3c1220adca | ||
|
|
4ed0eb7012 | ||
|
|
2af5445309 | ||
|
|
abb1916bda | ||
|
|
9424dca9e4 | ||
|
|
db84bb9bd3 | ||
|
|
c603f85488 | ||
|
|
2f1ee25f50 | ||
|
|
7bdf9005e5 | ||
|
|
d9c1d79e30 | ||
|
|
bd88b86919 | ||
|
|
8e29ae8c44 | ||
|
|
d158607f8e | ||
|
|
939fbb3c38 | ||
|
|
3b9dfa9d29 | ||
|
|
0c76fb57f2 | ||
|
|
9694fa8d3a | ||
|
|
20761e053e | ||
|
|
29d885b40f | ||
|
|
087dc13965 | ||
|
|
e7f559c582 | ||
|
|
52c5f6e152 | ||
|
|
26ca59859f | ||
|
|
23d6770ff9 | ||
|
|
ac36a377b0 | ||
|
|
1642867136 | ||
|
|
ce40392803 | ||
|
|
5f1af8a69d | ||
|
|
c57ff2640e | ||
|
|
d7fd396b7c | ||
|
|
45d145a823 | ||
|
|
221ef78faa | ||
|
|
d86513cbba | ||
|
|
25b5904b84 | ||
|
|
c2eb60df4a | ||
|
|
feabd0430c | ||
|
|
838de23357 | ||
|
|
d7b7040408 | ||
|
|
44e4bdc6f4 | ||
|
|
779060bc16 | ||
|
|
76239fa1ae | ||
|
|
5e53f767c4 | ||
|
|
974073a2e5 | ||
|
|
d693431183 | ||
|
|
bedf739d16 | ||
|
|
082755de1a | ||
|
|
6299e42aa9 | ||
|
|
129f41cee9 | ||
|
|
415bbc74aa | ||
|
|
13e41f2c68 | ||
|
|
1e117b780a | ||
|
|
f8c5c24159 | ||
|
|
f5a55c44d4 | ||
|
|
91a0e7bdaa | ||
|
|
b07e309627 | ||
|
|
9ea45399ce | ||
|
|
c19b1a143e | ||
|
|
02c24a860d | ||
|
|
9f652708ee | ||
|
|
05fa790584 | ||
|
|
e0db822a9b | ||
|
|
ec0fee6208 | ||
|
|
a188554fe1 | ||
|
|
8d52c7daf3 | ||
|
|
c49ebaaf1a | ||
|
|
acc9645249 | ||
|
|
60f961dfe8 | ||
|
|
0c48b1d993 | ||
|
|
d57b57bddc | ||
|
|
3837d2b94b | ||
|
|
f81a188ef6 | ||
|
|
8e417e28d1 | ||
|
|
4ce6830a7b |
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -7,6 +7,11 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
All GitHub issues are addressed on a best-effort basis at MinIO's sole discretion. There are no Service Level Agreements (SLA) or Objectives (SLO). Remember our [Code of Conduct](https://github.com/minio/minio/blob/master/code_of_conduct.md) when engaging with MinIO Engineers and the larger community.
|
||||
|
||||
For urgent issues (e.g. production down, etc.), subscribe to [SUBNET](https://min.io/pricing?jmp=github) for direct to engineering support.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,6 +7,9 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
If this case is urgent, please subscribe to [Subnet](https://min.io/pricing) so that our 24/7 support team may help you faster.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
39
.github/lock.yml
vendored
39
.github/lock.yml
vendored
@@ -1,39 +0,0 @@
|
||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads-app
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 365
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: true
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >-
|
||||
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
2
.github/stale.yml
vendored
2
.github/stale.yml
vendored
@@ -14,7 +14,7 @@ onlyLabels: []
|
||||
exemptLabels:
|
||||
- "security"
|
||||
- "pending discussion"
|
||||
- "do not close"
|
||||
- "do-not-close"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
15
.github/workflows/go-cross.yml
vendored
15
.github/workflows/go-cross.yml
vendored
@@ -1,23 +1,26 @@
|
||||
name: Go
|
||||
name: Crosscompile
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO crosscompile tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Build Tests with Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12'
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
45
.github/workflows/go-healing.yml
vendored
Normal file
45
.github/workflows/go-healing.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Healing Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:oyArl7zlPECEduNbB1KXgdzDn2Bdpvvw0l8VO51HQnY="
|
||||
MINIO_KMS_AUTO_ENCRYPTION: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify-healing
|
||||
31
.github/workflows/go-lint.yml
vendored
31
.github/workflows/go-lint.yml
vendored
@@ -1,23 +1,47 @@
|
||||
name: Go
|
||||
name: Linters and Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: |
|
||||
%LocalAppData%\go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
@@ -39,4 +63,5 @@ jobs:
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -deps -json ./... | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' | ./nancy sleuth
|
||||
make
|
||||
make test
|
||||
make test-race
|
||||
|
||||
21
.github/workflows/go.yml
vendored
21
.github/workflows/go.yml
vendored
@@ -1,23 +1,37 @@
|
||||
name: Go
|
||||
name: Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO Setup on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }} - healing
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
@@ -32,4 +46,3 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify
|
||||
make verify-healing
|
||||
|
||||
95
.github/workflows/iam-integrations.yaml
vendored
Normal file
95
.github/workflows/iam-integrations.yaml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: IAM integration
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
iam-matrix-test:
|
||||
name: "[Go=${{ matrix.go-version }}|ldap=${{ matrix.ldap }}|etcd=${{ matrix.etcd }}|openid=${{ matrix.openid }}]"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: quay.io/minio/openldap
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
env:
|
||||
LDAP_ORGANIZATION: "MinIO Inc"
|
||||
LDAP_DOMAIN: "min.io"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
etcd:
|
||||
image: "quay.io/coreos/etcd:v3.5.1"
|
||||
env:
|
||||
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ports:
|
||||
- "2379:2379"
|
||||
options: >-
|
||||
--health-cmd "etcdctl endpoint health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
openid:
|
||||
image: quay.io/minio/dex
|
||||
ports:
|
||||
- "5556:5556"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
|
||||
strategy:
|
||||
# When ldap, etcd or openid vars are empty below, those external servers
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
exclude:
|
||||
# exclude combos where all are empty.
|
||||
- ldap: ""
|
||||
etcd: ""
|
||||
openid: ""
|
||||
# exclude combos where both ldap and openid IDPs are specified.
|
||||
- ldap: "localhost:389"
|
||||
openid: "http://127.0.0.1:5556/dex"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Test LDAP/OpenID/Etcd combo
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
make test-site-replication-ldap
|
||||
- name: Test OIDC for automatic site replication
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
24
.github/workflows/lock.yml
vendored
Normal file
24
.github/workflows/lock.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: '365'
|
||||
exclude-any-issue-labels: 'do-not-close'
|
||||
issue-lock-reason: 'resolved'
|
||||
log-output: true
|
||||
25
.github/workflows/markdown-lint.yaml
vendored
Normal file
25
.github/workflows/markdown-lint.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Markdown Linter
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint all docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Lint all docs
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
markdownlint --fix '**/*.md' --disable MD013 MD040
|
||||
46
.github/workflows/replication.yaml
vendored
Normal file
46
.github/workflows/replication.yaml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Multi-site replication tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
replication-test:
|
||||
name: Replication Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- name: Test Replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-replication
|
||||
- name: Test MinIO IDP for automatic site replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-site-replication-minio
|
||||
|
||||
31
.github/workflows/upgrade-ci-cd.yaml
vendored
Normal file
31
.github/workflows/upgrade-ci-cd.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Upgrade old version tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Start upgrade tests
|
||||
run: |
|
||||
make test-upgrade
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -21,4 +21,15 @@ prime/
|
||||
stage/
|
||||
.sia_temp/
|
||||
config.json
|
||||
node_modules/
|
||||
node_modules/
|
||||
mc.*
|
||||
s3-check-md5*
|
||||
xl-meta*
|
||||
healing-*
|
||||
inspect*
|
||||
200M*
|
||||
hash-set
|
||||
minio.RELEASE*
|
||||
mc
|
||||
nancy
|
||||
inspects/*
|
||||
@@ -23,12 +23,28 @@ linters:
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
- gocritic
|
||||
- gofumpt
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
lang-version: "1.17"
|
||||
|
||||
# Choose whether or not to use the extra rules that are disabled
|
||||
# by default
|
||||
extra-rules: false
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
# todo fix these when we get enough time.
|
||||
- "singleCaseSwitch: should rewrite switch statement to if statement"
|
||||
- "unlambda: replace"
|
||||
- "captLocal:"
|
||||
- "ifElseChain:"
|
||||
- "elseif:"
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
||||
7
COMPLIANCE.md
Normal file
7
COMPLIANCE.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# AGPLv3 Compliance
|
||||
|
||||
We have designed MinIO as an Open Source software for the Open Source software community. This requires applications to consider whether their usage of MinIO is in compliance with the GNU AGPLv3 [license](https://github.com/minio/minio/blob/master/LICENSE).
|
||||
|
||||
MinIO cannot make the determination as to whether your application's usage of MinIO is in compliance with the AGPLv3 license requirements. You should instead rely on your own legal counsel or licensing specialists to audit and ensure your application is in compliance with the licenses of MinIO and all other open-source projects with which your application integrates or interacts. We understand that AGPLv3 licensing is complex and nuanced. It is for that reason we strongly encourage using experts in licensing to make any such determinations around compliance instead of relying on apocryphal or anecdotal advice.
|
||||
|
||||
[MinIO Commercial Licensing](https://min.io/pricing) is the best option for applications that trigger AGPLv3 obligations (e.g. open sourcing your application). Applications using MinIO - or any other OSS-licensed code - without validating their usage do so at their own risk.
|
||||
@@ -7,15 +7,17 @@
|
||||
Start by forking the MinIO GitHub repository, make changes in a branch and then send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details:
|
||||
|
||||
### Setup your MinIO GitHub Repository
|
||||
|
||||
Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to your own personal repository. Copy the URL of your MinIO fork (you will need it for the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/minio/minio
|
||||
$ go install -v
|
||||
$ ls /go/bin/minio
|
||||
git clone https://github.com/minio/minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
```sh
|
||||
$ cd minio
|
||||
$ git remote add upstream https://github.com/minio/minio
|
||||
@@ -25,13 +27,15 @@ $ git merge upstream/master
|
||||
```
|
||||
|
||||
### Create your feature branch
|
||||
|
||||
Before making code changes, make sure you create a separate branch for these changes
|
||||
|
||||
```
|
||||
$ git checkout -b my-new-feature
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
### Test MinIO server changes
|
||||
|
||||
After your code changes, make sure
|
||||
|
||||
- To add test cases for the new code. If you have questions about how to do it, please ask on our [Slack](https://slack.min.io) channel.
|
||||
@@ -40,29 +44,38 @@ After your code changes, make sure
|
||||
- To run `make test` and `make build` completes.
|
||||
|
||||
### Commit changes
|
||||
|
||||
After verification, commit your changes. This is a [great post](https://chris.beams.io/posts/git-commit/) on how to write useful commit messages
|
||||
|
||||
```
|
||||
$ git commit -am 'Add some feature'
|
||||
git commit -am 'Add some feature'
|
||||
```
|
||||
|
||||
### Push to the branch
|
||||
|
||||
Push your locally committed changes to the remote origin (your fork)
|
||||
|
||||
```
|
||||
$ git push origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
```
|
||||
|
||||
### Create a Pull Request
|
||||
|
||||
Pull requests can be created via GitHub. Refer to [this document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged.
|
||||
|
||||
## FAQs
|
||||
|
||||
### How does ``MinIO`` manage dependencies?
|
||||
|
||||
``MinIO`` uses `go mod` to manage its dependencies.
|
||||
|
||||
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
|
||||
|
||||
To remove a dependency
|
||||
|
||||
- Edit your code and remove the import reference.
|
||||
- Run `go mod tidy` in the source folder to remove dependency from `go.mod` file.
|
||||
|
||||
### What are the coding guidelines for MinIO?
|
||||
|
||||
``MinIO`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.min.io).
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
FROM minio/minio:edge
|
||||
FROM minio/minio:latest
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY minio /usr/bin/
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
|
||||
50
Dockerfile.hotfix
Normal file
50
Dockerfile.hotfix
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -19,7 +19,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=$PATH:/opt/bin
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -27,8 +27,9 @@ COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
@@ -39,7 +40,8 @@ RUN \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -19,7 +19,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
PATH=$PATH:/opt/bin
|
||||
PATH=/opt/bin:$PATH
|
||||
|
||||
COPY dockerscripts/verify-minio.sh /usr/bin/verify-minio.sh
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
@@ -27,8 +27,9 @@ COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
@@ -39,7 +40,8 @@ RUN \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
||||
69
Makefile
69
Makefile
@@ -19,9 +19,9 @@ help: ## print this help
|
||||
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer)
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -34,45 +34,84 @@ check-gen: ## check for updated autogenerated files
|
||||
|
||||
lint: ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
|
||||
check: test
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
test-race: verifiers build
|
||||
test-upgrade: build
|
||||
@echo "Running minio upgrade tests"
|
||||
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
|
||||
|
||||
test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-replication: install ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
|
||||
|
||||
test-site-replication-ldap: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with LDAP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-ldap.sh)
|
||||
|
||||
test-site-replication-oidc: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with OIDC)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-oidc.sh)
|
||||
|
||||
test-site-replication-minio: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
$(eval TAG := "minio/minio:$(VERSION)")
|
||||
|
||||
docker-hotfix: hotfix checks ## builds minio docker container with hotfix tags
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
@mv -f ./minio ./minio.$(VERSION)
|
||||
@minisign -qQSm ./minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < ./minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio.$(VERSION).sha256sum
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@docker push -q $(TAG) && echo "Published new container $(TAG)"
|
||||
|
||||
docker-hotfix: hotfix-push checks ## builds minio docker container with hotfix tags
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@docker build -q --no-cache -t $(TAG) --build-arg RELEASE=$(VERSION) . -f Dockerfile.hotfix
|
||||
|
||||
docker: build checks ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
|
||||
111
README.md
111
README.md
@@ -1,13 +1,14 @@
|
||||
# MinIO Quickstart Guide
|
||||
|
||||
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/) [](https://github.com/minio/minio/blob/master/LICENSE)
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
This README provides quickstart instructions on running MinIO on baremetal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
# Container Installation
|
||||
## Container Installation
|
||||
|
||||
Use the following commands to run a standalone MinIO server as a container.
|
||||
|
||||
@@ -16,7 +17,7 @@ require distributed deploying MinIO with Erasure Coding. For extended developmen
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
for more complete documentation.
|
||||
|
||||
## Stable
|
||||
### Stable
|
||||
|
||||
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
|
||||
|
||||
@@ -26,22 +27,22 @@ podman run -p 9000:9000 -p 9001:9001 \
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
|
||||
# macOS
|
||||
## macOS
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
## Homebrew (recommended)
|
||||
### Homebrew (recommended)
|
||||
|
||||
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -57,11 +58,11 @@ brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
## Binary Download
|
||||
### Binary Download
|
||||
|
||||
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -71,11 +72,11 @@ chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
# GNU/Linux
|
||||
## GNU/Linux
|
||||
|
||||
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
@@ -91,18 +92,18 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
|
||||
| Architecture | URL |
|
||||
| -------- | ------ |
|
||||
| 64-bit Intel/AMD | https://dl.min.io/server/minio/release/linux-amd64/minio |
|
||||
| 64-bit ARM | https://dl.min.io/server/minio/release/linux-arm64/minio |
|
||||
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
|
||||
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
# Microsoft Windows
|
||||
## Microsoft Windows
|
||||
|
||||
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
|
||||
|
||||
@@ -116,31 +117,31 @@ Use the following command to run a standalone MinIO server on the Windows host.
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
# Install from Source
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.17](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go install github.com/minio/minio@latest
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see https://docs.min.io/docs/ and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
# Deployment Recommendations
|
||||
## Deployment Recommendations
|
||||
|
||||
## Allow port access for Firewalls
|
||||
### Allow port access for Firewalls
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
@@ -196,18 +197,21 @@ service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
# Test MinIO Connectivity
|
||||
## Test MinIO Connectivity
|
||||
|
||||
## Test using MinIO Console
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure your server has started successfully.
|
||||
### Test using MinIO Console
|
||||
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
|
||||
> NOTE: MinIO runs console on random port by default if you wish choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
|
||||
### Things to consider
|
||||
|
||||
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
|
||||
|
||||
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
|
||||
@@ -218,34 +222,40 @@ Similarly, if your TLS certificates do not have the IP SAN for the MinIO server
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
|
||||
|
||||
# Upgrading MinIO
|
||||
MinIO server supports rolling upgrades, i.e. you can update one MinIO instance at a time in a distributed cluster. This allows upgrades with no downtime. Upgrades can be done manually by replacing the binary with the latest release and restarting all servers in a rolling fashion. However, we recommend all our users to use [`mc admin update`](https://docs.min.io/docs/minio-admin-complete-guide.html#update) from the client. This will update all the nodes in the cluster simultaneously and restart them, as shown in the following command from the MinIO client (mc):
|
||||
## Upgrading MinIO
|
||||
|
||||
```
|
||||
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/minio/baremetal/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
> NOTE: some releases might not allow rolling upgrades, this is always called out in the release notes and it is generally advised to read release notes before upgrading. In such a situation `mc admin update` is the recommended upgrading mechanism to upgrade all servers at once.
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and do `mc admin service restart alias/`.
|
||||
|
||||
## Important things to remember during MinIO upgrades
|
||||
- For RPM/DEB installations, upgrade packages **parallelly** on all servers. Once upgraded, perform `systemctl restart minio` across all nodes in **parallel**. RPM/DEB based installations are usually automated using [`ansible`](https://github.com/minio/ansible-minio).
|
||||
|
||||
- `mc admin update` will only work if the user running MinIO has write access to the parent directory where the binary is located, for example if the current binary is at `/usr/local/bin/minio`, you would need write access to `/usr/local/bin`.
|
||||
- `mc admin update` updates and restarts all servers simultaneously, applications would retry and continue their respective operations upon upgrade.
|
||||
- `mc admin update` is disabled in kubernetes/container environments, container environments provide their own mechanisms to rollout of updates.
|
||||
- In the case of federated setups `mc admin update` should be run against each cluster individually. Avoid updating `mc` to any new releases until all clusters have been successfully updated.
|
||||
- If using `kes` as KMS with MinIO, just replace the binary and restart `kes` more information about `kes` can be found [here](https://github.com/minio/kes/wiki)
|
||||
- If using Vault as KMS with MinIO, ensure you have followed the Vault upgrade procedure outlined here: https://www.vaultproject.io/docs/upgrading/index.html
|
||||
- If using etcd with MinIO for the federation, ensure you have followed the etcd upgrade procedure outlined here: https://github.com/etcd-io/etcd/blob/master/Documentation/upgrades/upgrading-etcd.md
|
||||
### Upgrade Checklist
|
||||
|
||||
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
|
||||
- Read the release notes for the targeted MinIO release *before* performing any installation, there is no forced requirement to upgrade to latest releases every week. If it has a bug fix you are looking for then yes, else avoid actively upgrading a running production system.
|
||||
- Make sure MinIO process has write access to `/opt/bin` if you plan to use `mc admin update`. This is needed for MinIO to download the latest binary from <https://dl.min.io> and save it locally for upgrades.
|
||||
- `mc admin update` is not supported in kubernetes/container environments, container environments provide their own mechanisms for container updates.
|
||||
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
|
||||
|
||||
## Explore Further
|
||||
|
||||
# Explore Further
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
|
||||
@@ -253,9 +263,12 @@ mc admin update <minio alias, e.g., myminio>
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
|
||||
- [The MinIO documentation website](https://docs.min.io)
|
||||
|
||||
# Contribute to MinIO Project
|
||||
## Contribute to MinIO Project
|
||||
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
# License
|
||||
MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
## License
|
||||
|
||||
- MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
- MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)
|
||||
|
||||
@@ -18,9 +18,10 @@ you need access credentials for a successful exploit).
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security team
|
||||
for the past five days please contact the security team directly:
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
- Primary security coordinator: aead@min.io
|
||||
- Secondary coordinator: harsha@min.io
|
||||
- If you receive no response: dev@min.io
|
||||
|
||||
### Disclosure Process
|
||||
|
||||
@@ -32,7 +33,7 @@ MinIO uses the following disclosure process:
|
||||
If the report is rejected the response explains why.
|
||||
3. Code is audited to find any potential similar problems.
|
||||
4. Fixes are prepared for the latest release.
|
||||
5. On the date that the fixes are applied a security advisory will be published on https://blog.min.io.
|
||||
5. On the date that the fixes are applied a security advisory will be published on <https://blog.min.io>.
|
||||
Please inform us in your report email whether MinIO should mention your contribution w.r.t. fixing
|
||||
the security issue. By default MinIO will **not** publish this information to protect your privacy.
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
## Vulnerability Management Policy
|
||||
# Vulnerability Management Policy
|
||||
|
||||
This document formally describes the process of addressing and managing a
|
||||
reported vulnerability that has been found in the MinIO server code base,
|
||||
any directly connected ecosystem component or a direct / indirect dependency
|
||||
of the code base.
|
||||
|
||||
### Scope
|
||||
## Scope
|
||||
|
||||
The vulnerability management policy described in this document covers the
|
||||
process of investigating, assessing and resolving a vulnerability report
|
||||
@@ -14,13 +14,13 @@ opened by a MinIO employee or an external third party.
|
||||
Therefore, it lists pre-conditions and actions that should be performed to
|
||||
resolve and fix a reported vulnerability.
|
||||
|
||||
### Vulnerability Management Process
|
||||
## Vulnerability Management Process
|
||||
|
||||
The vulnerability management process requires that the vulnerability report
|
||||
contains the following information:
|
||||
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
- The project / component that contains the reported vulnerability.
|
||||
- A description of the vulnerability. In particular, the type of the
|
||||
reported vulnerability and how it might be exploited. Alternatively,
|
||||
a well-established vulnerability identifier, e.g. CVE number, can be
|
||||
used instead.
|
||||
@@ -28,12 +28,11 @@ contains the following information:
|
||||
Based on the description mentioned above, a MinIO engineer or security team
|
||||
member investigates:
|
||||
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
- Whether the reported vulnerability exists.
|
||||
- The conditions that are required such that the vulnerability can be exploited.
|
||||
- The steps required to fix the vulnerability.
|
||||
|
||||
In general, if the vulnerability exists in one of the MinIO code bases
|
||||
itself - not in a code dependency - then MinIO will, if possible, fix
|
||||
the vulnerability or implement reasonable countermeasures such that the
|
||||
vulnerability cannot be exploited anymore.
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
**/*.swp
|
||||
cover.out
|
||||
*~
|
||||
minio
|
||||
!*/
|
||||
site/
|
||||
**/*.test
|
||||
**/*.sublime-workspace
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
build
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
.DS_Store
|
||||
*.syso
|
||||
coverage.txt
|
||||
node_modules
|
||||
92
buildscripts/minio-upgrade.sh
Normal file
92
buildscripts/minio-upgrade.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
docker volume prune -f
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
local sum1
|
||||
sum1=$(curl -s "$2" | sha256sum);
|
||||
mc admin heal --json -r "$1" >/dev/null; # test after healing
|
||||
local sum1_heal
|
||||
sum1_heal=$(curl -s "$2" | sha256sum);
|
||||
|
||||
if [ "${sum1_heal}" != "${sum1}" ]; then
|
||||
echo "mismatch expected ${sum1_heal}, got ${sum1}"
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
verify_checksum_mc() {
|
||||
local expected
|
||||
expected=$(mc cat "$1" | sha256sum)
|
||||
local got
|
||||
got=$(mc cat "$2" | sha256sum)
|
||||
|
||||
if [ "${expected}" != "${got}" ]; then
|
||||
echo "mismatch - expected ${expected}, got ${got}"
|
||||
exit 1;
|
||||
fi
|
||||
echo "matches - ${expected}, got ${got}"
|
||||
}
|
||||
|
||||
add_alias() {
|
||||
for i in $(seq 1 4); do
|
||||
echo "... attempting to add alias $i"
|
||||
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
|
||||
echo "...waiting... for 5secs" && sleep 5
|
||||
done
|
||||
done
|
||||
|
||||
echo "Sleeping for nginx"
|
||||
sleep 20
|
||||
}
|
||||
|
||||
__init__() {
|
||||
sudo apt install curl -y
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH=${PATH}:${GOPATH}/bin
|
||||
|
||||
go install github.com/minio/mc@latest
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
mc mb minio/minio-test/
|
||||
mc cp ./minio minio/minio-test/to-read/
|
||||
mc cp /etc/hosts minio/minio-test/to-read/hosts
|
||||
mc policy set download minio/minio-test
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
|
||||
|
||||
cleanup
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
for d in $(go list ./... | grep -v browser); do
|
||||
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
|
||||
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
|
||||
for d in $(go list ./... | grep -v dsync); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
done
|
||||
|
||||
172
buildscripts/unaligned-healing.sh
Executable file
172
buildscripts/unaligned-healing.sh
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO_OLD=( "$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
function download_old_release() {
|
||||
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
|
||||
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
|
||||
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
|
||||
fi
|
||||
}
|
||||
|
||||
function start_minio_16drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
|
||||
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
|
||||
"${WORK_DIR}/mc" cp \
|
||||
"${WORK_DIR}/unaligned" \
|
||||
minio/healing-shard-bucket/unaligned \
|
||||
--disable-multipart --quiet
|
||||
|
||||
## "unaligned" object name gets consistently distributed
|
||||
## to disks in following distribution order
|
||||
##
|
||||
## NOTE: if you change the name make sure to change the
|
||||
## distribution order present here
|
||||
##
|
||||
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
|
||||
|
||||
## make sure to remove the "last" data shard
|
||||
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
## Heal the shard
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
## then remove any other data shard let's pick first disk
|
||||
## - 1st data shard.
|
||||
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
download_old_release
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_16drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
81
buildscripts/upgrade-tests/compose.yml
Normal file
81
buildscripts/upgrade-tests/compose.yml
Normal file
@@ -0,0 +1,81 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
command: server http://minio{1...4}/data{1...3}
|
||||
env_file:
|
||||
- ./minio.env
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- data1-1:/data1
|
||||
- data1-2:/data2
|
||||
- data1-3:/data3
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- data2-1:/data1
|
||||
- data2-2:/data2
|
||||
- data2-3:/data3
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- data3-1:/data1
|
||||
- data3-2:/data2
|
||||
- data3-3:/data3
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- data4-1:/data1
|
||||
- data4-2:/data2
|
||||
- data4-3:/data3
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
data1-1:
|
||||
data1-2:
|
||||
data1-3:
|
||||
data2-1:
|
||||
data2-2:
|
||||
data2-3:
|
||||
data3-1:
|
||||
data3-2:
|
||||
data3-3:
|
||||
data4-1:
|
||||
data4-2:
|
||||
data4-3:
|
||||
3
buildscripts/upgrade-tests/minio.env
Normal file
3
buildscripts/upgrade-tests/minio.env
Normal file
@@ -0,0 +1,3 @@
|
||||
MINIO_ACCESS_KEY=minioadmin
|
||||
MINIO_SECRET_KEY=minioadmin
|
||||
MINIO_BROWSER=off
|
||||
68
buildscripts/upgrade-tests/nginx.conf
Normal file
68
buildscripts/upgrade-tests/nginx.conf
Normal file
@@ -0,0 +1,68 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
#gzip on;
|
||||
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
}
|
||||
|
||||
# main minio
|
||||
server {
|
||||
listen 9000;
|
||||
listen [::]:9000;
|
||||
server_name localhost;
|
||||
|
||||
# To allow special characters in headers
|
||||
ignore_invalid_headers off;
|
||||
# Allow any size file to be uploaded.
|
||||
# Set to a value such as 1000m; to restrict file size to a specific value
|
||||
client_max_body_size 0;
|
||||
# To disable buffering
|
||||
proxy_buffering off;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
|
||||
proxy_pass http://minio;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,9 @@ export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
@@ -65,7 +68,7 @@ function start_minio_pool_erasure_sets_ipv6()
|
||||
{
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets{5...8}"
|
||||
export MINIO_ENDPOINTS="http://[::1]:9000${WORK_DIR}/pool-disk-sets-ipv6{1...4} http://[::1]:9001${WORK_DIR}/pool-disk-sets-ipv6{5...8}"
|
||||
"${MINIO[@]}" server --address="[::1]:9000" > "$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" > "$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
|
||||
@@ -17,55 +17,75 @@ function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_port=$2
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
args="$args http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/1/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/2/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/3/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/4/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/5/ http://127.0.0.1:$[$start_port+$i]${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+1]" $args > "${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
disown $!
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+2]" $args > "${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
disown $!
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$[$start_port+3]" $args > "${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
disown $!
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
if [ "$(pgrep -c minio)" -ne 3 ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
||||
if ! ps -p $pid1 1>&2 > /dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 > /dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 > /dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1;
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function check_online() {
|
||||
if grep -q 'Unable to initialize sub-systems' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -85,33 +105,36 @@ function __init__()
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 120
|
||||
start_minio_3_node 120 $2
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
perform_test "2"
|
||||
perform_test "1"
|
||||
perform_test "3"
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
|
||||
@@ -114,8 +114,6 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketACLHandler - GET Bucket ACL
|
||||
@@ -164,8 +162,6 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutObjectACLHandler - PUT Object ACL
|
||||
@@ -229,8 +225,6 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectACLHandler - GET Object ACL
|
||||
@@ -283,6 +277,4 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
@@ -65,12 +65,33 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
quotaConfig, err := parseBucketQuota(bucket, data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
if quotaConfig.Type == "fifo" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -99,7 +120,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -155,7 +176,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
@@ -230,7 +251,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -324,7 +345,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
@@ -37,14 +37,17 @@ func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Reques
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
for _, action := range actions {
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
@@ -83,8 +86,28 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
case SRError:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(e.Code, e.Cause)
|
||||
case decomError:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errDecommissionAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errDecommissionComplete):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
@@ -97,12 +120,30 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
case errors.Is(err, errIAMServiceAccount):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMServiceAccount",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errIAMServiceAccountUsed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMServiceAccountUsed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errIAMNotInitialized):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, errPolicyInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminPolicyInUse",
|
||||
Description: "The policy cannot be removed, as it is in use",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
@@ -141,6 +182,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierBackendNotEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendNotEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -63,6 +64,12 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
subSys, _, _, err := config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -73,11 +80,32 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic := config.SubSystemsDynamic.Contains(subSys)
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
}
|
||||
|
||||
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
|
||||
r *http.Request, w http.ResponseWriter) {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// Tell the client that dynamic config was applied.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
@@ -117,7 +145,13 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
subSys, _, _, err := config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -135,14 +169,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
if dynamic {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// If all values were dynamic, tell the client.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@@ -160,7 +187,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
cfg := globalServerConfig.Clone()
|
||||
vars := mux.Vars(r)
|
||||
var buf = &bytes.Buffer{}
|
||||
buf := &bytes.Buffer{}
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -205,11 +232,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
} else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +273,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, ""); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -326,7 +351,6 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(rd)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
@@ -360,7 +384,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
if err = validateConfig(cfg, ""); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
179
cmd/admin-handlers-pools.go
Normal file
179
cmd/admin-handlers-pools.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StartDecommission")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := pools.Decommission(r.Context(), idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "CancelDecommission")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StatusPool")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := pools.Status(r.Context(), idx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListPools")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy args style such as non-ellipses style is not supported with this API.
|
||||
if globalEndpoints.Legacy() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
poolsStatus := make([]PoolStatus, len(globalEndpoints))
|
||||
for idx := range globalEndpoints {
|
||||
status, err := pools.Status(r.Context(), idx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
poolsStatus[idx] = status
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
@@ -45,16 +45,15 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
var sites []madmin.PeerSite
|
||||
errCode := readJSONBody(ctx, r.Body, &sites, cred.SecretKey)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
if err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status, errInfo := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
if errInfo.Code != ErrNone {
|
||||
logger.LogIf(ctx, errInfo)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(errInfo.Code, errInfo.Cause), r.URL)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -67,12 +66,12 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRInternalJoin - PUT /minio/admin/v3/site-replication/join
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
//
|
||||
// used internally to tell current cluster to enable SR with
|
||||
// the provided peer clusters and service account.
|
||||
func (a adminAPIHandlers) SRInternalJoin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalJoin")
|
||||
func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerJoin")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -81,24 +80,22 @@ func (a adminAPIHandlers) SRInternalJoin(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
var joinArg madmin.SRInternalJoinReq
|
||||
errCode := readJSONBody(ctx, r.Body, &joinArg, cred.SecretKey)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
var joinArg madmin.SRPeerJoinReq
|
||||
if err := parseJSONBody(ctx, r.Body, &joinArg, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
errInfo := globalSiteReplicationSys.InternalJoinReq(ctx, joinArg)
|
||||
if errInfo.Code != ErrNone {
|
||||
logger.LogIf(ctx, errInfo)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(errInfo.Code, errInfo.Cause), r.URL)
|
||||
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRInternalBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
|
||||
func (a adminAPIHandlers) SRInternalBucketOps(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalBucketOps")
|
||||
// SRPeerBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
|
||||
func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerBucketOps")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -113,6 +110,8 @@ func (a adminAPIHandlers) SRInternalBucketOps(w http.ResponseWriter, r *http.Req
|
||||
|
||||
var err error
|
||||
switch operation {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.MakeWithVersioningBktOp:
|
||||
_, isLockEnabled := r.Form["lockEnabled"]
|
||||
_, isVersioningEnabled := r.Form["versioningEnabled"]
|
||||
@@ -128,21 +127,17 @@ func (a adminAPIHandlers) SRInternalBucketOps(w http.ResponseWriter, r *http.Req
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, false)
|
||||
case madmin.ForceDeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, true)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// SRInternalReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||
func (a adminAPIHandlers) SRInternalReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalReplicateIAMItem")
|
||||
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||
func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerReplicateIAMItem")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -152,45 +147,51 @@ func (a adminAPIHandlers) SRInternalReplicateIAMItem(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
var item madmin.SRIAMItem
|
||||
errCode := readJSONBody(ctx, r.Body, &item, "")
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
switch item.Type {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRIAMItemPolicy:
|
||||
var policy *iampolicy.Policy
|
||||
if len(item.Policy) > 0 {
|
||||
policy, err = iampolicy.ParseConfig(bytes.NewReader(item.Policy))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
} else {
|
||||
policy, perr := iampolicy.ParseConfig(bytes.NewReader(item.Policy))
|
||||
if perr != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, perr), r.URL)
|
||||
return
|
||||
}
|
||||
if policy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy)
|
||||
}
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy)
|
||||
case madmin.SRIAMItemSvcAcc:
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, *item.SvcAccChange)
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange)
|
||||
case madmin.SRIAMItemPolicyMapping:
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, *item.PolicyMapping)
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping)
|
||||
case madmin.SRIAMItemSTSAcc:
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, *item.STSCredential)
|
||||
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential)
|
||||
case madmin.SRIAMItemIAMUser:
|
||||
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser)
|
||||
case madmin.SRIAMItemGroupInfo:
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRInternalReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
func (a adminAPIHandlers) SRInternalReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRInternalReplicateIAMItem")
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerReplicateBucketItem")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
@@ -200,50 +201,54 @@ func (a adminAPIHandlers) SRInternalReplicateBucketItem(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
var item madmin.SRBucketMeta
|
||||
errCode := readJSONBody(ctx, r.Body, &item, "")
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
switch item.Type {
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRBucketMetaTypePolicy:
|
||||
var bktPolicy *policy.Policy
|
||||
if len(item.Policy) > 0 {
|
||||
bktPolicy, err = policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
bktPolicy, berr := policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
|
||||
if berr != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, berr), r.URL)
|
||||
return
|
||||
}
|
||||
if bktPolicy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeQuotaConfig:
|
||||
if item.Quota == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
case madmin.SRBucketMetaTypeTags:
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags)
|
||||
case madmin.SRBucketMetaTypeObjectLockConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig)
|
||||
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationDisable - PUT /minio/admin/v3/site-replication/disable
|
||||
func (a adminAPIHandlers) SiteReplicationDisable(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationDisable")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationDisableAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -269,11 +274,9 @@ func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SRInternalGetIDPSettings(w http.ResponseWriter, r *http.Request) {
|
||||
func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationGetIDPSettings")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
@@ -288,28 +291,202 @@ func (a adminAPIHandlers) SRInternalGetIDPSettings(w http.ResponseWriter, r *htt
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func readJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) APIErrorCode {
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return ErrInvalidRequest
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if encryptionKey != "" {
|
||||
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ErrInvalidRequest
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(data, v)
|
||||
// SiteReplicationStatus - GET /minio/admin/v3/site-replication/status
|
||||
func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationStatus")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
opts := getSRStatusOptions(r)
|
||||
// default options to all if status options are unset for backward compatibility
|
||||
var dfltOpts madmin.SRStatusOptions
|
||||
if opts == dfltOpts {
|
||||
opts.Buckets = true
|
||||
opts.Users = true
|
||||
opts.Policies = true
|
||||
opts.Groups = true
|
||||
}
|
||||
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
return ErrAdminConfigBadJSON
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
return ErrNone
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationMetaInfo - GET /minio/admin/v3/site-replication/metainfo
|
||||
func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationMetaInfo")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
opts := getSRStatusOptions(r)
|
||||
info, err := globalSiteReplicationSys.SiteReplicationMetaInfo(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationEdit - PUT /minio/admin/v3/site-replication/edit
|
||||
func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationEdit")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
var site madmin.PeerInfo
|
||||
err := parseJSONBody(ctx, r.Body, &site, cred.SecretKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerEdit")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var pi madmin.PeerInfo
|
||||
if err := parseJSONBody(ctx, r.Body, &pi, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
q := r.Form
|
||||
opts.Buckets = q.Get("buckets") == "true"
|
||||
opts.Policies = q.Get("policies") == "true"
|
||||
opts.Groups = q.Get("groups") == "true"
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
return
|
||||
}
|
||||
|
||||
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
|
||||
func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationRemove")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
var rreq madmin.SRRemoveReq
|
||||
err := parseJSONBody(ctx, r.Body, &rreq, "")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SRPeerRemove - PUT /minio/admin/v3/site-replication/peer/remove
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SRPeerRemove")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var req madmin.SRRemoveReq
|
||||
if err := parseJSONBody(ctx, r.Body, &req, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
141
cmd/admin-handlers-users-race_test.go
Normal file
141
cmd/admin-handlers-users-race_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !race
|
||||
// +build !race
|
||||
|
||||
// Tests in this file are not run under the `-race` flag as they are too slow
|
||||
// and cause context deadline errors.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestDeleteUserRace(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
secretKeys[i] = secretKey
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
err := s.adm.RemoveUser(ctx, accessKeys[i])
|
||||
if err != nil {
|
||||
c.Fatalf("unable to remove user: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -58,21 +58,24 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
|
||||
if err := globalIAMSys.DeleteUser(ctx, accessKey, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete user.
|
||||
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: true,
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers - GET /minio/admin/v3/list-users?bucket={bucket}
|
||||
// ListBucketUsers - GET /minio/admin/v3/list-users?bucket={bucket}
|
||||
func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListBucketUsers")
|
||||
|
||||
@@ -164,34 +167,27 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := cred.ParentUser
|
||||
if accessKey == "" {
|
||||
accessKey = cred.AccessKey
|
||||
checkDenyOnly := false
|
||||
if name == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to view one's own info.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
// For temporary credentials always
|
||||
// the temporary credentials to check
|
||||
// policy without implicit permissions.
|
||||
if cred.IsTemp() && cred.ParentUser == globalActiveCred.AccessKey {
|
||||
accessKey = cred.AccessKey
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: checkDenyOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := name == accessKey
|
||||
if !implicitPerm {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, err := globalIAMSys.GetUserInfo(name)
|
||||
userInfo, err := globalIAMSys.GetUserInfo(ctx, name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -231,22 +227,23 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
|
||||
err = globalIAMSys.RemoveUsersFromGroup(ctx, updReq.Group, updReq.Members)
|
||||
} else {
|
||||
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
|
||||
err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to load group.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: updReq,
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,7 +287,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
groups, err := globalIAMSys.ListGroups()
|
||||
groups, err := globalIAMSys.ListGroups(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -321,11 +318,12 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
status := vars["status"]
|
||||
|
||||
var err error
|
||||
if status == statusEnabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, true)
|
||||
} else if status == statusDisabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, false)
|
||||
} else {
|
||||
switch status {
|
||||
case statusEnabled:
|
||||
err = globalIAMSys.SetGroupStatus(ctx, group, true)
|
||||
case statusDisabled:
|
||||
err = globalIAMSys.SetGroupStatus(ctx, group, false)
|
||||
default:
|
||||
err = errInvalidArgument
|
||||
}
|
||||
if err != nil {
|
||||
@@ -333,12 +331,18 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: madmin.GroupAddRemove{
|
||||
Group: group,
|
||||
Status: madmin.GroupStatus(status),
|
||||
IsRemove: false,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -357,23 +361,29 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
// This API is not allowed to lookup master access key user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.SetUserStatus(accessKey, madmin.AccountStatus(status)); err != nil {
|
||||
if err := globalIAMSys.SetUserStatus(ctx, accessKey, madmin.AccountStatus(status)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: false,
|
||||
UserReq: &madmin.AddOrUpdateUserReq{
|
||||
Status: madmin.AccountStatus(status),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -405,6 +415,14 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
userCred, exists := globalIAMSys.GetUser(ctx, accessKey)
|
||||
if exists && (userCred.IsTemp() || userCred.IsServiceAccount()) {
|
||||
// Updating STS credential is not allowed, and this API does not
|
||||
// support updating service accounts.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
|
||||
// Incoming access key matches parent user then we should
|
||||
// reject password change requests.
|
||||
@@ -412,39 +430,21 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
implicitPerm := accessKey == cred.AccessKey
|
||||
if !implicitPerm {
|
||||
parentUser := cred.ParentUser
|
||||
if parentUser == "" {
|
||||
parentUser = cred.AccessKey
|
||||
}
|
||||
// For temporary credentials always
|
||||
// the temporary credentials to check
|
||||
// policy without implicit permissions.
|
||||
if cred.IsTemp() && cred.ParentUser == globalActiveCred.AccessKey {
|
||||
parentUser = cred.AccessKey
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: parentUser,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", parentUser, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: accessKey,
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", accessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true, // check if changing password is explicitly denied.
|
||||
DenyOnly: checkDenyOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
@@ -464,24 +464,28 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var uinfo madmin.UserInfo
|
||||
if err = json.Unmarshal(configBytes, &uinfo); err != nil {
|
||||
var ureq madmin.AddOrUpdateUserReq
|
||||
if err = json.Unmarshal(configBytes, &ureq); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
|
||||
if err = globalIAMSys.CreateUser(ctx, accessKey, ureq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user
|
||||
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: false,
|
||||
UserReq: &ureq,
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -556,6 +560,23 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
// credentials).
|
||||
if isSvcAccForRequestor {
|
||||
// Check if adding service account is explicitly denied.
|
||||
//
|
||||
// This allows turning off service accounts for request sender,
|
||||
// if there is no deny statement this call is implicitly enabled.
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx,
|
||||
@@ -579,10 +600,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// user <> to the request sender
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
DenyOnly: true,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
@@ -622,22 +645,9 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call hook for cluster-replication.
|
||||
//
|
||||
// FIXME: This wont work in an OpenID situation as the parent credential
|
||||
// may not be present on peer clusters to provide inherited policies.
|
||||
// Also, we should not be replicating root user's service account - as
|
||||
// they are not authenticated by a common external IDP, so we skip when
|
||||
// opts.ldapUser == "".
|
||||
if _, isLDAPAccount := opts.claims[ldapUserN]; isLDAPAccount {
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
@@ -658,7 +668,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
createResp := madmin.AddServiceAccountResp{
|
||||
Credentials: madmin.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
@@ -761,22 +771,8 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call site replication hook. Only LDAP accounts are supported for
|
||||
// replication operations.
|
||||
svcAccClaims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, isLDAPAccount := svcAccClaims[ldapUserN]; isLDAPAccount {
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
@@ -793,6 +789,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -847,18 +844,15 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var svcAccountPolicy iampolicy.Policy
|
||||
|
||||
impliedPolicy := policy == nil
|
||||
|
||||
// If policy is empty, check for policy of the parent user
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = svcAccountPolicy.Merge(*policy)
|
||||
if policy != nil {
|
||||
svcAccountPolicy = *policy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, false)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
svcAccountPolicy = svcAccountPolicy.Merge(globalIAMSys.GetCombinedPolicy(policiesNames...))
|
||||
svcAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
|
||||
}
|
||||
|
||||
policyJSON, err := json.MarshalIndent(svcAccountPolicy, "", " ")
|
||||
@@ -867,10 +861,10 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var infoResp = madmin.InfoServiceAccountResp{
|
||||
infoResp := madmin.InfoServiceAccountResp{
|
||||
ParentUser: svcAccount.ParentUser,
|
||||
AccountStatus: svcAccount.Status,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
ImpliedPolicy: policy == nil,
|
||||
Policy: string(policyJSON),
|
||||
}
|
||||
|
||||
@@ -910,8 +904,10 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
|
||||
var targetAccount string
|
||||
|
||||
// If listing is requested for a specific user (who is not the request
|
||||
// sender), check that the user has permissions.
|
||||
user := r.Form.Get("user")
|
||||
if user != "" {
|
||||
if user != "" && user != cred.AccessKey {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListServiceAccountsAdminAction,
|
||||
@@ -942,7 +938,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
listResp := madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccountsNames,
|
||||
}
|
||||
|
||||
@@ -1014,27 +1010,14 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount, true)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, nerr := range globalNotificationSys.DeleteServiceAccount(serviceAccount) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call site replication hook. Only LDAP accounts are supported for
|
||||
// replication operations.
|
||||
svcAccClaims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, isLDAPAccount := svcAccClaims[ldapUserN]; isLDAPAccount {
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
err = globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
@@ -1114,11 +1097,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
var err error
|
||||
if !globalIsGateway {
|
||||
// Load the latest calculated data usage
|
||||
dataUsageInfo, err = loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// log the error, continue with the accounting response
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
dataUsageInfo, _ = loadDataUsageFromBackend(ctx, objectAPI)
|
||||
}
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
@@ -1149,19 +1128,11 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
accountName := cred.AccessKey
|
||||
var policies []string
|
||||
switch globalIAMSys.usersSysType {
|
||||
case MinIOUsersSysType:
|
||||
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
|
||||
case LDAPUsersSysType:
|
||||
parentUser := accountName
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
|
||||
default:
|
||||
err = errors.New("should never happen")
|
||||
if cred.IsTemp() || cred.IsServiceAccount() {
|
||||
// For derived credentials, check the parent user's permissions.
|
||||
accountName = cred.ParentUser
|
||||
}
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false, cred.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -1195,15 +1166,11 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Fetch the prefix usage of the current bucket
|
||||
var prefixUsage map[string]uint64
|
||||
if enablePrefixUsage {
|
||||
if pu, err := loadPrefixUsageFromBackend(ctx, objectAPI, bucket.Name); err == nil {
|
||||
prefixUsage = pu
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
prefixUsage, _ = loadPrefixUsageFromBackend(ctx, objectAPI, bucket.Name)
|
||||
}
|
||||
|
||||
lcfg, _ := globalBucketObjectLockSys.Get(bucket.Name)
|
||||
quota, _ := globalBucketQuotaSys.Get(bucket.Name)
|
||||
quota, _ := globalBucketQuotaSys.Get(ctx, bucket.Name)
|
||||
rcfg, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name)
|
||||
tcfg, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name)
|
||||
|
||||
@@ -1240,6 +1207,16 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
|
||||
//
|
||||
// Newer API response with policy timestamps is returned with query parameter
|
||||
// `v=2` like:
|
||||
//
|
||||
// GET /minio/admin/v3/info-canned-policy?name={policyName}&v=2
|
||||
//
|
||||
// The newer API will eventually become the default (and only) one. The older
|
||||
// response is to return only the policy JSON. The newer response returns
|
||||
// timestamps along with the policy JSON. Both versions are supported for now,
|
||||
// for smooth transition to new API.
|
||||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
@@ -1250,13 +1227,36 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
|
||||
name := mux.Vars(r)["name"]
|
||||
policies := newMappedPolicy(name).toSlice()
|
||||
if len(policies) != 1 {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTooManyPolicies), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyDoc, err := globalIAMSys.InfoPolicy(name)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buf, err := json.MarshalIndent(policy, "", " ")
|
||||
// Is the new API version being requested?
|
||||
infoPolicyAPIVersion := r.Form.Get("v")
|
||||
if infoPolicyAPIVersion == "2" {
|
||||
buf, err := json.MarshalIndent(policyDoc, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Write(buf)
|
||||
return
|
||||
} else if infoPolicyAPIVersion != "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("invalid version parameter 'v' supplied")), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the older API response value of just the policy json.
|
||||
buf, err := json.MarshalIndent(policyDoc.Policy, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1276,13 +1276,13 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
policies, err := globalIAMSys.ListPolicies(bucket)
|
||||
policies, err := globalIAMSys.ListPolicies(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -1295,9 +1295,6 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
@@ -1311,13 +1308,13 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.ListPolicies("")
|
||||
policies, err := globalIAMSys.ListPolicies(ctx, "")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -1330,8 +1327,6 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
@@ -1348,19 +1343,11 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||
if err := globalIAMSys.DeletePolicy(ctx, policyName, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to delete policy
|
||||
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call cluster-replication policy creation hook to replicate policy deletion to
|
||||
// other minio clusters.
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
@@ -1416,19 +1403,11 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
|
||||
if err = globalIAMSys.SetPolicy(ctx, policyName, *iamPolicy); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload policy
|
||||
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call cluster-replication policy creation hook to replicate policy to
|
||||
// other minio clusters.
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
@@ -1469,19 +1448,11 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
|
||||
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
|
||||
if err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, isGroup); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload policy
|
||||
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
PolicyMapping: &madmin.SRPolicyMapping{
|
||||
|
||||
1176
cmd/admin-handlers-users_test.go
Normal file
1176
cmd/admin-handlers-users_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,7 @@ import (
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
@@ -36,6 +37,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
@@ -190,7 +192,11 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// ServiceHandler - POST /minio/admin/v3/service?action={action}
|
||||
// ----------
|
||||
// restarts/stops minio server gracefully. In a distributed setup,
|
||||
// Supports following actions:
|
||||
// - restart (restarts all the MinIO instances in a setup)
|
||||
// - stop (stops all the MinIO instances in a setup)
|
||||
// - freeze (freezes all incoming S3 API calls)
|
||||
// - unfreeze (unfreezes previously frozen S3 API calls)
|
||||
func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Service")
|
||||
|
||||
@@ -205,6 +211,10 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
serviceSig = serviceRestart
|
||||
case madmin.ServiceActionStop:
|
||||
serviceSig = serviceStop
|
||||
case madmin.ServiceActionFreeze:
|
||||
serviceSig = serviceFreeze
|
||||
case madmin.ServiceActionUnfreeze:
|
||||
serviceSig = serviceUnFreeze
|
||||
default:
|
||||
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
@@ -217,6 +227,8 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceRestartAdminAction)
|
||||
case serviceStop:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceStopAdminAction)
|
||||
case serviceFreeze, serviceUnFreeze:
|
||||
objectAPI, _ = validateAdminReq(ctx, w, r, iampolicy.ServiceFreezeAdminAction)
|
||||
}
|
||||
if objectAPI == nil {
|
||||
return
|
||||
@@ -233,7 +245,14 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
||||
// Reply to the client before restarting, stopping MinIO server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
globalServiceSignalCh <- serviceSig
|
||||
switch serviceSig {
|
||||
case serviceFreeze:
|
||||
freezeServices()
|
||||
case serviceUnFreeze:
|
||||
unfreezeServices()
|
||||
case serviceRestart, serviceStop:
|
||||
globalServiceSignalCh <- serviceSig
|
||||
}
|
||||
}
|
||||
|
||||
// ServerProperties holds some server information such as, version, region
|
||||
@@ -266,6 +285,7 @@ type ServerHTTPAPIStats struct {
|
||||
// including their average execution time.
|
||||
type ServerHTTPStats struct {
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
S3RequestsIncoming uint64 `json:"s3RequestsIncoming"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
@@ -316,7 +336,6 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
}
|
||||
|
||||
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
||||
@@ -756,13 +775,17 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
// Send whitespace and keep connection open
|
||||
w.Write([]byte(" "))
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case hr := <-respCh:
|
||||
switch hr.apiErr {
|
||||
case noError:
|
||||
if started {
|
||||
w.Write(hr.respBytes)
|
||||
if _, err := w.Write(hr.respBytes); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
} else {
|
||||
writeSuccessResponseJSON(w, hr.respBytes)
|
||||
@@ -787,7 +810,9 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
w.WriteHeader(hr.apiErr.HTTPStatusCode)
|
||||
}
|
||||
w.Write(errorRespJSON)
|
||||
if _, err := w.Write(errorRespJSON); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
break forLoop
|
||||
@@ -911,12 +936,65 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SpeedtestHandler")
|
||||
// NetperfHandler - perform mesh style network throughput test
|
||||
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetperfHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsDistErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "netperf")
|
||||
lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
|
||||
return
|
||||
}
|
||||
defer nsLock.Unlock(lkctx.Cancel)
|
||||
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
|
||||
if duration < globalNetPerfMinDuration {
|
||||
// We need sample size of minimum 10 secs.
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
|
||||
duration = duration.Round(time.Second)
|
||||
|
||||
results := globalNotificationSys.Netperf(ctx, duration)
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(madmin.NetperfResult{NodeResults: results}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SpeedtestHandler - Deprecated. See ObjectSpeedtestHandler
|
||||
func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
a.ObjectSpeedtestHandler(w, r)
|
||||
}
|
||||
|
||||
// ObjectSpeedtestHandler - reports maximum speed of a cluster by performing PUT and
|
||||
// GET operations on the server, supports auto tuning by default by automatically
|
||||
// increasing concurrency and stopping when we have reached the limits on the
|
||||
// system.
|
||||
func (a adminAPIHandlers) ObjectSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ObjectSpeedtestHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
@@ -929,12 +1007,8 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
autotuneStr := r.Form.Get("autotune")
|
||||
|
||||
var autotune bool
|
||||
if autotuneStr != "" {
|
||||
autotune = true
|
||||
}
|
||||
autotune := r.Form.Get("autotune") == "true"
|
||||
storageClass := r.Form.Get("storage-class")
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
@@ -946,59 +1020,170 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
||||
concurrent = 32
|
||||
}
|
||||
|
||||
if runtime.GOMAXPROCS(0) < concurrent {
|
||||
concurrent = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = time.Second * 10
|
||||
}
|
||||
|
||||
throughputSize := size
|
||||
iopsSize := size
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx)
|
||||
capacityNeeded := uint64(concurrent * size)
|
||||
capacity := uint64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo))
|
||||
|
||||
if autotune {
|
||||
iopsSize = 4 * humanize.KiByte
|
||||
if capacity < capacityNeeded {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
||||
Code: "XMinioSpeedtestInsufficientCapacity",
|
||||
Message: fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
|
||||
humanize.IBytes(capacityNeeded), humanize.IBytes(capacity)),
|
||||
StatusCode: http.StatusInsufficientStorage,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if we can employ autotune without running out of capacity,
|
||||
// if we do run out of capacity, make sure to turn-off autotuning
|
||||
// in such situations.
|
||||
newConcurrent := concurrent + (concurrent+1)/2
|
||||
autoTunedCapacityNeeded := uint64(newConcurrent * size)
|
||||
if autotune && capacity < autoTunedCapacityNeeded {
|
||||
// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
|
||||
autotune = false
|
||||
}
|
||||
|
||||
deleteBucket := func() {
|
||||
objectAPI.DeleteBucket(context.Background(), pathJoin(minioMetaBucket, "speedtest"), DeleteBucketOptions{
|
||||
Force: true,
|
||||
NoRecreate: true,
|
||||
})
|
||||
}
|
||||
defer deleteBucket()
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
ch := speedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NetSpeedtestHandler - reports maximum network throughput
|
||||
func (a adminAPIHandlers) NetSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "NetSpeedtestHandler")
|
||||
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
}
|
||||
|
||||
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
|
||||
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DriveSpeedtestHandler")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
|
||||
serial := r.Form.Get("serial") == "true"
|
||||
blockSizeStr := r.Form.Get("blocksize")
|
||||
fileSizeStr := r.Form.Get("filesize")
|
||||
|
||||
blockSize, err := strconv.ParseUint(blockSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
blockSize = 4 * humanize.MiByte // default value
|
||||
}
|
||||
|
||||
fileSize, err := strconv.ParseUint(fileSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
fileSize = 1 * humanize.GiByte // default value
|
||||
}
|
||||
|
||||
opts := madmin.DriveSpeedTestOpts{
|
||||
Serial: serial,
|
||||
BlockSize: blockSize,
|
||||
FileSize: fileSize,
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
endBlankRepliesCh := make(chan error)
|
||||
enc := json.NewEncoder(w)
|
||||
ch := globalNotificationSys.DriveSpeedTest(ctx, opts)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// local driveSpeedTest
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
endBlankRepliesCh <- nil
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := json.NewEncoder(w).Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
endBlankRepliesCh <- err
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case endBlankRepliesCh <- nil:
|
||||
return
|
||||
}
|
||||
defer wg.Done()
|
||||
enc.Encode(driveSpeedTest(ctx, opts))
|
||||
if wf, ok := w.(http.Flusher); ok {
|
||||
wf.Flush()
|
||||
}
|
||||
}()
|
||||
|
||||
result, err := speedTest(ctx, throughputSize, iopsSize, concurrent, duration, autotune)
|
||||
if <-endBlankRepliesCh != nil {
|
||||
return
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
goto endloop
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
||||
goto endloop
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
goto endloop
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
goto endloop
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(result); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objectAPI.DeleteBucket(ctx, pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix), DeleteBucketOptions{Force: true, NoRecreate: true})
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
endloop:
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Admin API errors
|
||||
@@ -1208,10 +1393,10 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
if err := enc.Encode(log); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(logCh) > 0 {
|
||||
@@ -1313,7 +1498,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
}
|
||||
var response = madmin.KMSKeyStatus{
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
}
|
||||
|
||||
@@ -1370,6 +1555,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
//nolint:gocritic
|
||||
if err != nil {
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
@@ -1451,7 +1637,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalServerRegion,
|
||||
Region: globalSite.Region,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
DeploymentID: globalDeploymentID,
|
||||
Buckets: buckets,
|
||||
@@ -1650,8 +1836,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
anonymizeCmdLine := func(cmdLine string) string {
|
||||
if !globalIsDistErasure {
|
||||
// FS mode - single server - hard code to `server1`
|
||||
anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1)
|
||||
return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1)
|
||||
anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
|
||||
return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
|
||||
}
|
||||
|
||||
// Server start command regex groups:
|
||||
@@ -1796,7 +1982,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
anonNetwork[anonEndpoint] = status
|
||||
}
|
||||
return anonNetwork
|
||||
|
||||
}
|
||||
|
||||
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
||||
@@ -1847,6 +2032,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
tls := getTLSInfo()
|
||||
isK8s := IsKubernetes()
|
||||
isDocker := IsDocker()
|
||||
healthInfo.Minio.Info = madmin.MinioInfo{
|
||||
Mode: infoMessage.Mode,
|
||||
Domain: infoMessage.Domain,
|
||||
@@ -1859,6 +2048,9 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
Services: infoMessage.Services,
|
||||
Backend: infoMessage.Backend,
|
||||
Servers: servers,
|
||||
TLS: &tls,
|
||||
IsKubernetes: &isK8s,
|
||||
IsDocker: &isDocker,
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
@@ -1873,19 +2065,41 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, enc.Encode(oinfo))
|
||||
w.(http.Flusher).Flush()
|
||||
if err := enc.Encode(oinfo); err != nil {
|
||||
return
|
||||
}
|
||||
if len(healthInfoCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-ticker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-deadlinedCtx.Done():
|
||||
w.(http.Flusher).Flush()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTLSInfo() madmin.TLSInfo {
|
||||
tlsInfo := madmin.TLSInfo{
|
||||
TLSEnabled: globalIsTLS,
|
||||
Certs: []madmin.TLSCert{},
|
||||
}
|
||||
|
||||
if globalIsTLS {
|
||||
for _, c := range globalPublicCerts {
|
||||
tlsInfo.Certs = append(tlsInfo.Certs, madmin.TLSCert{
|
||||
PubKeyAlgo: c.PublicKeyAlgorithm.String(),
|
||||
SignatureAlgo: c.SignatureAlgorithm.String(),
|
||||
NotBefore: c.NotBefore,
|
||||
NotAfter: c.NotAfter,
|
||||
})
|
||||
}
|
||||
}
|
||||
return tlsInfo
|
||||
}
|
||||
|
||||
// BandwidthMonitorHandler - GET /minio/admin/v3/bandwidth
|
||||
@@ -1922,17 +2136,21 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
if err := enc.Encode(report); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
if len(reportCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
@@ -1988,7 +2206,6 @@ func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
@@ -2075,7 +2292,7 @@ func fetchKMSStatus() madmin.KMS {
|
||||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
for _, target := range logger.SystemTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
@@ -2091,7 +2308,7 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, target := range logger.AuditTargets {
|
||||
for _, target := range logger.AuditTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
@@ -2144,7 +2361,7 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
|
||||
|
||||
// getRawDataer provides an interface for getting raw FS files.
|
||||
type getRawDataer interface {
|
||||
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time, isDir bool) error) error
|
||||
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error
|
||||
}
|
||||
|
||||
// InspectDataHandler - GET /minio/admin/v3/inspect-data
|
||||
@@ -2221,19 +2438,23 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
zipWriter := zip.NewWriter(encw)
|
||||
defer zipWriter.Close()
|
||||
|
||||
err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, size int64, modtime time.Time, isDir bool) error {
|
||||
err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, si StatInfo) error {
|
||||
// Prefix host+disk
|
||||
filename = path.Join(host, disk, filename)
|
||||
if isDir {
|
||||
if si.Dir {
|
||||
filename += "/"
|
||||
size = 0
|
||||
si.Size = 0
|
||||
}
|
||||
if si.Mode == 0 {
|
||||
// Not, set it to default.
|
||||
si.Mode = 0o600
|
||||
}
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
size: size,
|
||||
mode: 0600,
|
||||
modTime: modtime,
|
||||
isDir: isDir,
|
||||
size: si.Size,
|
||||
mode: os.FileMode(si.Mode),
|
||||
modTime: si.ModTime,
|
||||
isDir: si.Dir,
|
||||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
@@ -2251,7 +2472,9 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
return nil
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createHostAnonymizerForFSMode() map[string]string {
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
@@ -40,11 +41,13 @@ type adminErasureTestBed struct {
|
||||
erasureDirs []string
|
||||
objLayer ObjectLayer
|
||||
router *mux.Router
|
||||
done context.CancelFunc
|
||||
}
|
||||
|
||||
// prepareAdminErasureTestBed - helper function that setups a single-node
|
||||
// Erasure backend for admin-handler tests.
|
||||
func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// reset global variables to start afresh.
|
||||
resetTestGlobals()
|
||||
@@ -56,11 +59,13 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
// Initializing objectLayer for HealFormatHandler.
|
||||
objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
|
||||
if xlErr != nil {
|
||||
cancel()
|
||||
return nil, xlErr
|
||||
}
|
||||
|
||||
// Initialize minio server config.
|
||||
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -69,11 +74,11 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.InitStore(objLayer)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
// Setup admin mgmt REST API handlers.
|
||||
adminRouter := mux.NewRouter()
|
||||
@@ -83,12 +88,14 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
erasureDirs: erasureDirs,
|
||||
objLayer: objLayer,
|
||||
router: adminRouter,
|
||||
done: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TearDown - method that resets the test bed for subsequent unit
|
||||
// tests to start afresh.
|
||||
func (atb *adminErasureTestBed) TearDown() {
|
||||
atb.done()
|
||||
removeRoots(atb.erasureDirs)
|
||||
resetTestGlobals()
|
||||
}
|
||||
@@ -229,8 +236,8 @@ func TestServiceRestartHandler(t *testing.T) {
|
||||
|
||||
// buildAdminRequest - helper function to build an admin API request.
|
||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error,
|
||||
) {
|
||||
req, err := newTestRequest(method,
|
||||
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
||||
contentLength, bodySeeker)
|
||||
@@ -373,5 +380,4 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -278,8 +278,8 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
respBytes []byte, apiErr APIError, errMsg string,
|
||||
) {
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
@@ -338,8 +338,8 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
clientToken string) ([]byte, APIErrorCode,
|
||||
) {
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
@@ -453,8 +453,8 @@ type healSequence struct {
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
hs madmin.HealOpts, forceStart bool,
|
||||
) *healSequence {
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
@@ -491,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 {
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
count += v
|
||||
}
|
||||
return count
|
||||
}
|
||||
@@ -700,8 +700,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
}
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
@@ -710,6 +711,9 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
|
||||
select {
|
||||
case globalBackgroundHealRoutine.tasks <- task:
|
||||
if serverDebugLog {
|
||||
logger.Info("Task in the queue: %#v", task)
|
||||
}
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
@@ -885,6 +889,7 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
opts: &h.settings,
|
||||
}, madmin.HealItemObject)
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
|
||||
@@ -38,13 +38,10 @@ type adminAPIHandlers struct{}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
||||
/// Service operations
|
||||
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
}
|
||||
@@ -71,17 +68,20 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
|
||||
|
||||
/// Health operations
|
||||
// Pool operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/pools/list").HandlerFunc(gz(httpTraceAll(adminAPI.ListPools)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/pools/status").HandlerFunc(gz(httpTraceAll(adminAPI.StatusPool))).Queries("pool", "{pool:.*}")
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(gz(httpTraceAll(adminAPI.StartDecommission))).Queries("pool", "{pool:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(gz(httpTraceAll(adminAPI.CancelDecommission))).Queries("pool", "{pool:.*}")
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
@@ -106,7 +106,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))
|
||||
@@ -191,16 +191,26 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/disable").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationDisable)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRInternalGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
@@ -212,6 +222,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
}
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(httpTraceHdrs(adminAPI.ObjectSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(httpTraceHdrs(adminAPI.DriveSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(httpTraceHdrs(adminAPI.NetperfHandler))
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(http.HandlerFunc(adminAPI.TraceHandler)))
|
||||
|
||||
@@ -48,10 +48,15 @@ func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElem
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
// ObjectV object version key/versionId
|
||||
type ObjectV struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// ObjectToDelete carries key name for the object to delete.
|
||||
type ObjectToDelete struct {
|
||||
ObjectV
|
||||
// Replication status of DeleteMarker
|
||||
DeleteMarkerReplicationStatus string `xml:"DeleteMarkerReplicationStatus"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
|
||||
@@ -82,6 +82,7 @@ const (
|
||||
ErrIncompleteBody
|
||||
ErrInternalError
|
||||
ErrInvalidAccessKeyID
|
||||
ErrAccessKeyDisabled
|
||||
ErrInvalidBucketName
|
||||
ErrInvalidDigest
|
||||
ErrInvalidRange
|
||||
@@ -109,6 +110,7 @@ const (
|
||||
ErrNoSuchBucketPolicy
|
||||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrInvalidLifecycleWithObjectLock
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
@@ -128,6 +130,7 @@ const (
|
||||
ErrReplicationSourceNotVersionedError
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrReplicationDenyEditError
|
||||
ErrReplicationNoMatchingRuleError
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
@@ -209,6 +212,7 @@ const (
|
||||
ErrInvalidSSECustomerParameters
|
||||
ErrIncompatibleEncryptionMethod
|
||||
ErrKMSNotConfigured
|
||||
ErrKMSKeyNotFoundException
|
||||
|
||||
ErrNoAccessKey
|
||||
ErrInvalidToken
|
||||
@@ -395,10 +399,10 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalServerRegion != "" {
|
||||
if globalSite.Region != "" {
|
||||
switch errCode {
|
||||
case ErrAuthorizationHeaderMalformed:
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -512,6 +516,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The Access Key Id you provided does not exist in our records.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccessKeyDisabled: {
|
||||
Code: "InvalidAccessKeyId",
|
||||
Description: "Your account is disabled; please contact your administrator.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidBucketName: {
|
||||
Code: "InvalidBucketName",
|
||||
Description: "The specified bucket is not valid.",
|
||||
@@ -577,6 +586,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrInvalidLifecycleWithObjectLock: {
|
||||
Code: "InvalidLifecycleWithObjectLock",
|
||||
Description: "The lifecycle configuration containing MaxNoncurrentVersions is not supported with object locking",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoSuchBucketSSEConfig: {
|
||||
Code: "ServerSideEncryptionConfigurationNotFoundError",
|
||||
Description: "The server side encryption configuration was not found",
|
||||
@@ -674,7 +688,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrAllAccessDisabled: {
|
||||
Code: "AllAccessDisabled",
|
||||
Description: "All access to this bucket has been disabled.",
|
||||
Description: "All access to this resource has been disabled.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMalformedPolicy: {
|
||||
@@ -882,6 +896,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "No matching replication rule found for this object prefix",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationDenyEditError: {
|
||||
Code: "XMinioReplicationDenyEdit",
|
||||
Description: "Cannot alter local replication config since this server is in a cluster replication setup",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
Description: "The remote target cannot be identical to source",
|
||||
@@ -973,7 +992,7 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
/// Bucket notification related errors.
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "A specified event is not supported for notifications.",
|
||||
@@ -1109,6 +1128,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Server side encryption specified but KMS is not configured",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrKMSKeyNotFoundException: {
|
||||
Code: "KMS.NotFoundException",
|
||||
Description: "Invalid keyId",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrNoAccessKey: {
|
||||
Code: "AccessDenied",
|
||||
Description: "No AWSAccessKey was presented",
|
||||
@@ -1120,14 +1144,14 @@ var errorCodes = errorCodeMap{
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
Code: "XAmzContentSHA256Mismatch",
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
/// MinIO extensions.
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
@@ -1362,15 +1386,15 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrBackendDown: {
|
||||
Code: "XMinioBackendDown",
|
||||
Description: "Object storage backend is unreachable",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
Description: "Remote backend is unreachable",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectContinuationToken: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "The continuation token provided is incorrect",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
//S3 Select API Errors
|
||||
// S3 Select API Errors
|
||||
ErrEmptyRequestBody: {
|
||||
Code: "EmptyRequestBody",
|
||||
Description: "Request body cannot be empty.",
|
||||
@@ -1894,6 +1918,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrIncompatibleEncryptionMethod
|
||||
case errKMSNotConfigured:
|
||||
apiErr = ErrKMSNotConfigured
|
||||
case errKMSKeyNotFound:
|
||||
apiErr = ErrKMSKeyNotFoundException
|
||||
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
@@ -1956,6 +1983,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectLocked:
|
||||
apiErr = ErrObjectLocked
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
@@ -2072,6 +2101,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
//nolint:gocritic
|
||||
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
|
||||
"request declared a Content-Length of %d but only wrote %d bytes",
|
||||
&ie, &iw); ferr != nil {
|
||||
@@ -2104,7 +2134,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
@@ -2127,6 +2157,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
}
|
||||
|
||||
if apiErr.Code == "XMinioBackendDown" {
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
return apiErr
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
@@ -2212,7 +2247,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// since S3 only sends one Error XML response.
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case azblob.StorageError:
|
||||
apiErr = APIError{
|
||||
@@ -2222,6 +2256,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
default:
|
||||
//nolint:gocritic
|
||||
if errors.Is(err, errMalformedEncoding) {
|
||||
apiErr = APIError{
|
||||
Code: "BadRequest",
|
||||
@@ -2271,7 +2306,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalServerRegion,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerRegion; region != "" {
|
||||
if region := globalSite.Region; region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -126,6 +126,11 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
|
||||
// Set all other user defined metadata.
|
||||
for k, v := range objInfo.UserDefined {
|
||||
// Empty values for object lock and retention can be skipped.
|
||||
if v == "" && equals(k, xhttp.AmzObjectLockMode, xhttp.AmzObjectLockRetainUntilDate) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
func TestNewRequestID(t *testing.T) {
|
||||
// Ensure that it returns an alphanumeric result of length 16.
|
||||
var id = mustGetRequestID(UTCNow())
|
||||
id := mustGetRequestID(UTCNow())
|
||||
|
||||
if len(id) != 16 {
|
||||
t.Fail()
|
||||
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxDeleteList = 1000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
@@ -268,7 +268,6 @@ type StringMap map[string]string
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
@@ -417,8 +416,8 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{
|
||||
data := ListBucketsResponse{}
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
@@ -439,14 +438,14 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
data := ListVersionsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
content := ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -486,7 +485,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@@ -497,14 +496,14 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
data := ListObjectsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -535,7 +534,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@@ -546,14 +545,14 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
data := ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -608,7 +607,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
||||
}
|
||||
@@ -724,9 +723,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
if !quiet {
|
||||
deleteResp.DeletedObjects = deletedObjects
|
||||
}
|
||||
if len(errs) == len(deletedObjects) {
|
||||
deleteResp.DeletedObjects = nil
|
||||
}
|
||||
deleteResp.Errors = errs
|
||||
return deleteResp
|
||||
}
|
||||
@@ -740,7 +736,6 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
||||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
w.Write(response)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -791,9 +786,9 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
}
|
||||
|
||||
// Generate error response.
|
||||
@@ -825,8 +820,8 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||
errBody string, reqURL *url.URL) {
|
||||
|
||||
errBody string, reqURL *url.URL,
|
||||
) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/minio/console/restapi"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
@@ -42,6 +43,18 @@ func setHTTPServer(h *xhttp.Server) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newConsoleServerFn() *restapi.Server {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalConsoleSrv
|
||||
}
|
||||
|
||||
func setConsoleSrv(srv *restapi.Server) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalConsoleSrv = srv
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newObjectLayerFn() ObjectLayer {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
@@ -274,7 +287,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("getobjectlegalhold", maxClients(gz(httpTraceAll(api.GetObjectLegalHoldHandler))))).Queries("legal-hold", "")
|
||||
// GetObject - note gzip compression is *not* added due to Range requests.
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
|
||||
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectHandler)))))
|
||||
// CopyObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(gz(httpTraceAll(api.CopyObjectHandler)))))
|
||||
@@ -301,7 +314,8 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
|
||||
@@ -329,6 +343,9 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstatus", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStatusHandler))))).Queries("replication-reset-status", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
@@ -355,7 +372,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
// DeleteBucketWebsiteHandler
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
@@ -403,9 +420,10 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// PutBucketNotification
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketnotification", maxClients(gz(httpTraceAll(api.PutBucketNotificationHandler))))).Queries("notification", "")
|
||||
// ResetBucketReplicationState - MinIO extension API
|
||||
// ResetBucketReplicationStart - MinIO extension API
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstate", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStateHandler))))).Queries("replication-reset", "")
|
||||
collectAPIStats("resetbucketreplicationstart", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStartHandler))))).Queries("replication-reset", "")
|
||||
|
||||
// PutBucket
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucket", maxClients(gz(httpTraceAll(api.PutBucketHandler)))))
|
||||
@@ -452,7 +470,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
@@ -470,7 +488,6 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
|
||||
@@ -44,7 +44,6 @@ func TestS3EncodeName(t *testing.T) {
|
||||
if testCase.expectedOutput != outputText {
|
||||
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -197,13 +197,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
if token == "" {
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -212,9 +206,15 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
claims, err := auth.ExtractClaims(token, secret)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
if subtle.ConstantTimeCompare([]byte(secret), []byte(globalActiveCred.SecretKey)) == 1 {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims, err = auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
}
|
||||
|
||||
// If OPA is set, return without any further checks.
|
||||
@@ -238,39 +238,53 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
}
|
||||
|
||||
// If LDAP claim key is set, return here.
|
||||
if _, ok := claims.MapClaims[ldapUser]; ok {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Session token must have a policy, reject requests without policy
|
||||
// claim.
|
||||
_, pokOpenID := claims.MapClaims[iamPolicyClaimNameOpenID()]
|
||||
_, pokSA := claims.MapClaims[iamPolicyClaimNameSA()]
|
||||
if !pokOpenID && !pokSA {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
|
||||
if token == "" && cred.IsTemp() {
|
||||
// Temporary credentials should always have x-amz-security-token
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
|
||||
if token != "" && !cred.IsTemp() {
|
||||
// x-amz-security-token should not present for static credentials.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
return claims, ErrNone
|
||||
|
||||
if cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
// validate token for temporary credentials only.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
secret := globalActiveCred.SecretKey
|
||||
if cred.IsServiceAccount() {
|
||||
token = cred.SessionToken
|
||||
secret = cred.SecretKey
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
claims, err := getClaimsFromTokenWithSecret(token, secret)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
@@ -299,7 +313,7 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -487,6 +501,26 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
amzDate, errCode := parseAmzDateHeader(r)
|
||||
if errCode != ErrNone {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
|
||||
// or in the future, reject request otherwise.
|
||||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
if isSupportedS3AuthType(aType) || aType == authTypeJWT || aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
@@ -509,7 +543,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
@@ -576,7 +610,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
|
||||
@@ -362,11 +362,14 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(context.Background(), objLayer)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
globalIAMSys.InitStore(objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
creds, err := auth.CreateCredentials("myuser", "mypassword")
|
||||
if err != nil {
|
||||
@@ -392,10 +395,9 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
{mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerRegion, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -433,15 +435,15 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
@@ -453,11 +455,11 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(context.Background(), objLayer)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.InitStore(objLayer)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
creds, err := auth.CreateCredentials("admin", "mypassword")
|
||||
if err != nil {
|
||||
|
||||
@@ -115,7 +115,6 @@ func newHealRoutine() *healRoutine {
|
||||
tasks: make(chan healTask),
|
||||
workers: workers,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
|
||||
@@ -18,12 +18,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -263,7 +263,7 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content...",
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content - use 'mc admin heal alias/ --verbose' to check the status",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
@@ -277,35 +277,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := bgSeq.healDiskMeta(objAPI); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
for _, ep := range globalEndpoints {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
disk, _, err := connectEndpoint(endpoint)
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, endpoint)
|
||||
} else if err == nil && disk != nil && disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
for _, disk := range globalLocalDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
if len(disksToHeal) == globalEndpoints.NEndpoints() {
|
||||
// When all disks == all command line endpoints
|
||||
// this is a fresh setup, no need to trigger healing.
|
||||
return Endpoints{}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
@@ -334,7 +325,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.queueHealTask(healSource{bucket: nopHeal}, madmin.HealItemMetadata)
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal - 'mc admin heal alias/ --verbose' to check the status.",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
@@ -375,15 +366,13 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
})
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
@@ -407,12 +396,15 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
}
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
|
||||
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', disk was swapped again on %s pool: %w",
|
||||
disk, humanize.Ordinal(i+1), err))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
@@ -434,16 +426,19 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, tracker.QueuedBuckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
var buf bytes.Buffer
|
||||
tracker.printTo(&buf)
|
||||
logger.Info("Summary:\n%s", buf.String())
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
|
||||
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
|
||||
logger.Info("Summary:\n")
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.Info("\n")
|
||||
}
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
|
||||
@@ -119,8 +119,10 @@ func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath strin
|
||||
// Close all the readers.
|
||||
func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
for _, r := range rs {
|
||||
if br, ok := r.(io.Closer); ok {
|
||||
br.Close()
|
||||
if r != nil {
|
||||
if br, ok := r.(io.Closer); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -128,8 +130,10 @@ func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
if w != nil {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -212,7 +216,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
// bitrotSelfTest tries to catch any issue in the bitrot implementation
|
||||
// early instead of silently corrupting data.
|
||||
func bitrotSelfTest() {
|
||||
var checksums = map[BitrotAlgorithm]string{
|
||||
checksums := map[BitrotAlgorithm]string{
|
||||
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
|
||||
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
|
||||
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
|
||||
|
||||
@@ -86,14 +86,29 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
return fmt.Errorf("Expected same MINIO_ environment variables and values")
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s / Mismatch environment values: %s`, missing, mismatching)
|
||||
}
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s`, missing)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() ServerSystemConfig {
|
||||
@@ -123,7 +138,6 @@ func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Reque
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// registerBootstrapRESTHandlers - register bootstrap rest router.
|
||||
@@ -191,11 +205,11 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
if isNetworkError(err) {
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
if !isNetworkError(err) {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
|
||||
}
|
||||
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
@@ -248,7 +262,7 @@ func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken)
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
|
||||
@@ -20,12 +20,15 @@ package cmd
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
@@ -84,6 +87,19 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
|
||||
return
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
@@ -92,7 +108,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -186,10 +202,19 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Call site replication hook.
|
||||
//
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
@@ -133,8 +133,6 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Add/update buckets that are not registered with the DNS
|
||||
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
ctx, cancel := g.WithCancelOnError(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
@@ -143,9 +141,12 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
}, index)
|
||||
}
|
||||
|
||||
if err := g.WaitErr(); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
ctx := GlobalContext
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
@@ -210,7 +211,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalServerRegion
|
||||
region := globalSite.Region
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -414,18 +415,23 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
const maxBodySize = 2 * 100000 * 1024
|
||||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjects, maxBodySize); err != nil {
|
||||
deleteObjectsReq := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjectsReq, maxBodySize); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objects := make([]ObjectV, len(deleteObjectsReq.Objects))
|
||||
// Convert object name delete objects if it has `/` in the beginning.
|
||||
for i := range deleteObjects.Objects {
|
||||
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
|
||||
for i := range deleteObjectsReq.Objects {
|
||||
deleteObjectsReq.Objects[i].ObjectName = trimLeadingSlash(deleteObjectsReq.Objects[i].ObjectName)
|
||||
objects[i] = deleteObjectsReq.Objects[i].ObjectV
|
||||
}
|
||||
|
||||
// Make sure to update context to print ObjectNames for multi objects.
|
||||
ctx = updateReqContext(ctx, objects...)
|
||||
|
||||
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
@@ -442,13 +448,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
// Return Malformed XML as S3 spec if the list of objects is empty
|
||||
if len(deleteObjects.Objects) == 0 {
|
||||
// Return Malformed XML as S3 spec if the number of objects is empty
|
||||
if len(deleteObjectsReq.Objects) == 0 || len(deleteObjectsReq.Objects) > maxDeleteList {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
objectsToDelete := map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
@@ -460,7 +466,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjectsReq.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
@@ -468,16 +474,23 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
oss := make([]*objSweeper, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
type deleteResult struct {
|
||||
delInfo DeletedObject
|
||||
errInfo DeleteError
|
||||
}
|
||||
|
||||
deleteResults := make([]deleteResult, len(deleteObjectsReq.Objects))
|
||||
|
||||
oss := make([]*objSweeper, len(deleteObjectsReq.Objects))
|
||||
|
||||
for index, object := range deleteObjectsReq.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
|
||||
return
|
||||
}
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -489,7 +502,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if _, err := uuid.Parse(object.VersionID); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
|
||||
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -519,8 +532,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
if replicateDeletes {
|
||||
dsc = checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
},
|
||||
}, goi, opts, gerr)
|
||||
if dsc.ReplicateAny() {
|
||||
if object.VersionID != "" {
|
||||
@@ -535,7 +550,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if object.VersionID != "" && hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
@@ -561,20 +576,25 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Disable timeouts and cancellation
|
||||
ctx = bgContext(ctx)
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
|
||||
for i := range errs {
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
},
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus(),
|
||||
VersionPurgeStatuses: dObjects[i].ReplicationState.VersionPurgeStatusInternal,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].ReplicationState.ReplicationStatusInternal,
|
||||
@@ -585,11 +605,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
if replicateDeletes {
|
||||
dObjects[i].ReplicationState = deleteList[i].ReplicationState()
|
||||
}
|
||||
deletedObjects[dindex] = dObjects[i]
|
||||
deleteResults[dindex].delInfo = dObjects[i]
|
||||
continue
|
||||
}
|
||||
apiErr := toAPIError(ctx, errs[i])
|
||||
dErrs[dindex] = DeleteError{
|
||||
deleteResults[dindex].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: deleteList[i].ObjectName,
|
||||
@@ -597,15 +617,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
var deleteErrors []DeleteError
|
||||
for _, dErr := range dErrs {
|
||||
if dErr.Code != "" {
|
||||
deleteErrors = append(deleteErrors, dErr)
|
||||
// Generate response
|
||||
deleteErrors := make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
||||
deletedObjects := make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
||||
for _, deleteResult := range deleteResults {
|
||||
if deleteResult.errInfo.Code != "" {
|
||||
deleteErrors = append(deleteErrors, deleteResult.errInfo)
|
||||
} else {
|
||||
deletedObjects = append(deletedObjects, deleteResult.delInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response
|
||||
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
|
||||
response := generateMultiDeleteResponse(deleteObjectsReq.Quiet, deletedObjects, deleteErrors)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
@@ -902,7 +925,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName))
|
||||
}
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
@@ -928,6 +951,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// explicit permissions for the user.
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
BucketName: bucket,
|
||||
@@ -1199,7 +1223,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
writeResponse(w, http.StatusOK, nil, mimeXML)
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
@@ -1246,6 +1270,17 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
switch {
|
||||
case err != nil:
|
||||
if _, ok := err.(BucketReplicationConfigNotFound); !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1269,7 +1304,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if globalDNSConfig != nil {
|
||||
if err2 := globalDNSConfig.Put(bucket); err2 != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, pl1ease fix it manually", err2))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2))
|
||||
}
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
@@ -1277,7 +1312,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
// Call site replication hook.
|
||||
if err := globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1344,7 +1379,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, objectLockConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1449,7 +1484,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1531,7 +1566,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1575,7 +1610,10 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
|
||||
return
|
||||
@@ -1602,7 +1640,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1677,7 +1715,11 @@ func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.Respons
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketReplicationConfig, nil); err != nil {
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1720,21 +1762,22 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW
|
||||
usageInfo = dataUsageInfo.BucketsUsage[bucket]
|
||||
}
|
||||
|
||||
bucketReplStats := getLatestReplicationStats(bucket, usageInfo)
|
||||
jsonData, err := json.Marshal(bucketReplStats)
|
||||
if err != nil {
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err = enc.Encode(getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, jsonData)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStateHandler - starts a replication reset for all objects in a bucket which
|
||||
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
|
||||
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
|
||||
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced.
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationState")
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
|
||||
// already in progress it returns an error
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
@@ -1758,6 +1801,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
}), r.URL)
|
||||
}
|
||||
}
|
||||
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@@ -1788,12 +1832,13 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn})
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("Remote target ARN %s missing/not eligible for replication resync", arn),
|
||||
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1817,22 +1862,90 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
}
|
||||
if err := startReplicationResync(ctx, bucket, arn, resetID, resetBeforeDate, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: err,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
tgtBytes, err := json.Marshal(&targets)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
|
||||
// This API is a MinIO only extension
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := r.URL.Query().Get("arn")
|
||||
var err error
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
|
||||
Arn: tarn,
|
||||
ResetID: st.ResyncID,
|
||||
StartTime: st.StartTime,
|
||||
EndTime: st.EndTime,
|
||||
ResyncStatus: st.ResyncStatus.String(),
|
||||
ReplicatedSize: st.ReplicatedSize,
|
||||
ReplicatedCount: st.ReplicatedCount,
|
||||
FailedSize: st.FailedSize,
|
||||
FailedCount: st.FailedCount,
|
||||
Bucket: st.Bucket,
|
||||
Object: st.Object,
|
||||
})
|
||||
}
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -80,8 +81,8 @@ func TestGetBucketLocationHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@@ -162,7 +163,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -209,7 +209,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -224,8 +223,8 @@ func TestHeadBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@@ -281,7 +280,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -296,7 +294,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -314,7 +311,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -330,8 +326,8 @@ func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
@@ -551,7 +547,6 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -567,8 +562,8 @@ func TestListBucketsHandler(t *testing.T) {
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
accessKey string
|
||||
@@ -614,7 +609,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -629,7 +623,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||
}
|
||||
@@ -645,7 +638,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -656,12 +648,12 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
var err error
|
||||
|
||||
contentBytes := []byte("hello")
|
||||
@@ -680,10 +672,35 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
objectNames = append(objectNames, objectName)
|
||||
}
|
||||
|
||||
for _, name := range []string{"private/object", "public/object"} {
|
||||
// Uploading the object with retention enabled
|
||||
_, err = obj.PutObject(GlobalContext, bucketName, name, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %s: Error uploading object: <ERROR> %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// The following block will create a bucket policy with delete object to 'public/*'. This is
|
||||
// to test a mixed response of a successful & failure while deleting objects in a single request
|
||||
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
|
||||
credentials.AccessKey, credentials.SecretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusNoContent {
|
||||
t.Errorf("Expected the response status to be `%d`, but instead found `%d`", 200, rec.Code)
|
||||
}
|
||||
|
||||
getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) {
|
||||
for _, objectName := range objectNames {
|
||||
objectList = append(objectList, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: objectName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -702,9 +719,21 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
return deleteErrorList
|
||||
}
|
||||
|
||||
objects := []ObjectToDelete{}
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: "private/object",
|
||||
},
|
||||
})
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: "public/object",
|
||||
},
|
||||
})
|
||||
requestList := []DeleteObjectsRequest{
|
||||
{Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])},
|
||||
{Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])},
|
||||
{Quiet: false, Objects: objects},
|
||||
}
|
||||
|
||||
// generate multi objects delete response.
|
||||
@@ -741,6 +770,20 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
anonResponse := generateMultiDeleteResponse(requestList[0].Quiet, nil, getDeleteErrorList(requestList[0].Objects))
|
||||
encodedAnonResponse := encodeResponse(anonResponse)
|
||||
|
||||
anonRequestWithPartialPublicAccess := encodeResponse(requestList[2])
|
||||
anonResponseWithPartialPublicAccess := generateMultiDeleteResponse(requestList[2].Quiet,
|
||||
[]DeletedObject{
|
||||
{ObjectName: "public/object"},
|
||||
},
|
||||
[]DeleteError{
|
||||
{
|
||||
Code: errorCodes[ErrAccessDenied].Code,
|
||||
Message: errorCodes[ErrAccessDenied].Description,
|
||||
Key: "private/object",
|
||||
},
|
||||
})
|
||||
encodedAnonResponseWithPartialPublicAccess := encodeResponse(anonResponseWithPartialPublicAccess)
|
||||
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
objects []byte
|
||||
@@ -800,6 +843,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
expectedContent: encodedAnonResponse,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 6.
|
||||
// Anonymous user has access to some public folder, issue removing with
|
||||
// another private object as well
|
||||
{
|
||||
bucket: bucketName,
|
||||
objects: anonRequestWithPartialPublicAccess,
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
expectedContent: encodedAnonResponseWithPartialPublicAccess,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
||||
@@ -91,7 +91,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -150,8 +150,8 @@ func TestBucketLifecycle(t *testing.T) {
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
// Tests are related and the order is important.
|
||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
creds auth.Credentials, t *testing.T) {
|
||||
|
||||
creds auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
@@ -266,8 +266,8 @@ func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRo
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}) {
|
||||
|
||||
},
|
||||
) {
|
||||
for i, testCase := range testCases {
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
@@ -81,40 +81,58 @@ type expiryTask struct {
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
once sync.Once
|
||||
expiryCh chan expiryTask
|
||||
once sync.Once
|
||||
byDaysCh chan expiryTask
|
||||
byNewerNoncurrentCh chan newerNoncurrentTask
|
||||
}
|
||||
|
||||
// PendingTasks returns the number of pending ILM expiry tasks.
|
||||
func (es *expiryState) PendingTasks() int {
|
||||
return len(es.expiryCh)
|
||||
return len(es.byDaysCh) + len(es.byNewerNoncurrentCh)
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, restoredObject bool, rmVersion bool) {
|
||||
// close closes work channels exactly once.
|
||||
func (es *expiryState) close() {
|
||||
es.once.Do(func() {
|
||||
close(es.byDaysCh)
|
||||
close(es.byNewerNoncurrentCh)
|
||||
})
|
||||
}
|
||||
|
||||
// enqueueByDays enqueues object versions expired by days for expiry.
|
||||
func (es *expiryState) enqueueByDays(oi ObjectInfo, restoredObject bool, rmVersion bool) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.once.Do(func() {
|
||||
close(es.expiryCh)
|
||||
})
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion, restoredObject: restoredObject}:
|
||||
es.close()
|
||||
case es.byDaysCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion, restoredObject: restoredObject}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalExpiryState *expiryState
|
||||
)
|
||||
// enqueueByNewerNoncurrent enqueues object versions expired by
|
||||
// NewerNoncurrentVersions limit for expiry.
|
||||
func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.close()
|
||||
case es.byNewerNoncurrentCh <- newerNoncurrentTask{bucket: bucket, versions: versions}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var globalExpiryState *expiryState
|
||||
|
||||
func newExpiryState() *expiryState {
|
||||
return &expiryState{
|
||||
expiryCh: make(chan expiryTask, 10000),
|
||||
byDaysCh: make(chan expiryTask, 10000),
|
||||
byNewerNoncurrentCh: make(chan newerNoncurrentTask, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
for t := range globalExpiryState.byDaysCh {
|
||||
if t.objInfo.TransitionedObject.Status != "" {
|
||||
applyExpiryOnTransitionedObject(ctx, objectAPI, t.objInfo, t.restoredObject)
|
||||
} else {
|
||||
@@ -122,6 +140,18 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for t := range globalExpiryState.byNewerNoncurrentCh {
|
||||
deleteObjectVersions(ctx, objectAPI, t.bucket, t.versions)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
|
||||
// by NewerNoncurrentVersions
|
||||
type newerNoncurrentTask struct {
|
||||
bucket string
|
||||
versions []ObjectToDelete
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
@@ -135,6 +165,9 @@ type transitionState struct {
|
||||
killCh chan struct{}
|
||||
|
||||
activeTasks int32
|
||||
|
||||
lastDayMu sync.RWMutex
|
||||
lastDayStats map[string]*lastDayTierStats
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
@@ -148,9 +181,7 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
)
|
||||
var globalTransitionState *transitionState
|
||||
|
||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||
return &transitionState{
|
||||
@@ -158,6 +189,7 @@ func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionStat
|
||||
ctx: ctx,
|
||||
objAPI: objAPI,
|
||||
killCh: make(chan struct{}),
|
||||
lastDayStats: make(map[string]*lastDayTierStats),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,14 +217,47 @@ func (t *transitionState) worker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
return
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, 1)
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
var tier string
|
||||
var err error
|
||||
if tier, err = transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
} else {
|
||||
ts := tierStats{
|
||||
TotalSize: uint64(oi.Size),
|
||||
NumVersions: 1,
|
||||
}
|
||||
if oi.IsLatest {
|
||||
ts.NumObjects = 1
|
||||
}
|
||||
t.addLastDayStats(tier, ts)
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, -1)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transitionState) addLastDayStats(tier string, ts tierStats) {
|
||||
t.lastDayMu.Lock()
|
||||
defer t.lastDayMu.Unlock()
|
||||
|
||||
if _, ok := t.lastDayStats[tier]; !ok {
|
||||
t.lastDayStats[tier] = &lastDayTierStats{}
|
||||
}
|
||||
t.lastDayStats[tier].addStats(ts)
|
||||
}
|
||||
|
||||
func (t *transitionState) getDailyAllTierStats() dailyAllTierStats {
|
||||
t.lastDayMu.RLock()
|
||||
defer t.lastDayMu.RUnlock()
|
||||
|
||||
res := make(dailyAllTierStats, len(t.lastDayStats))
|
||||
for tier, st := range t.lastDayStats {
|
||||
res[tier] = st.clone()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// UpdateWorkers at the end of this function leaves n goroutines waiting for
|
||||
// transition tasks
|
||||
func (t *transitionState) UpdateWorkers(n int) {
|
||||
@@ -337,15 +402,16 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) error {
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) (string, error) {
|
||||
lc, err := globalLifecycleSys.Get(oi.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
tier := lc.TransitionTier(oi.ToLifecycleOpts())
|
||||
opts := ObjectOptions{
|
||||
Transition: TransitionOptions{
|
||||
Status: lifecycle.TransitionPending,
|
||||
Tier: lc.TransitionTier(oi.ToLifecycleOpts()),
|
||||
Tier: tier,
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
@@ -353,7 +419,7 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo)
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
return objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
return tier, objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
@@ -436,9 +502,7 @@ func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil
|
||||
}
|
||||
|
||||
var (
|
||||
selectParamsXMLName = "SelectParameters"
|
||||
)
|
||||
var selectParamsXMLName = "SelectParameters"
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
@@ -550,7 +614,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
|
||||
if rreq.Type == SelectRestoreRequest {
|
||||
for _, v := range rreq.OutputLocation.S3.UserMetadata {
|
||||
if !strings.HasPrefix("x-amz-meta", strings.ToLower(v.Name)) {
|
||||
if !strings.HasPrefix(strings.ToLower(v.Name), "x-amz-meta") {
|
||||
meta["x-amz-meta-"+v.Name] = v.Value
|
||||
continue
|
||||
}
|
||||
@@ -630,9 +694,9 @@ func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
// String returns x-amz-restore compatible representation of r.
|
||||
func (r restoreObjStatus) String() string {
|
||||
if r.Ongoing() {
|
||||
return "ongoing-request=true"
|
||||
return `ongoing-request="true"`
|
||||
}
|
||||
return fmt.Sprintf("ongoing-request=false, expiry-date=%s", r.expiry.Format(http.TimeFormat))
|
||||
return fmt.Sprintf(`ongoing-request="false", expiry-date="%s"`, r.expiry.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Expiry returns expiry of restored object and true if restore-object has completed.
|
||||
@@ -676,12 +740,11 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
}
|
||||
|
||||
switch progressTokens[1] {
|
||||
case "true":
|
||||
case "true", `"true"`: // true without double quotes is deprecated in Feb 2022
|
||||
if len(tokens) == 1 {
|
||||
return ongoingRestoreObj(), nil
|
||||
}
|
||||
|
||||
case "false":
|
||||
case "false", `"false"`: // false without double quotes is deprecated in Feb 2022
|
||||
if len(tokens) != 2 {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
@@ -692,8 +755,7 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
if strings.TrimSpace(expiryTokens[0]) != "expiry-date" {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err := time.Parse(http.TimeFormat, expiryTokens[1])
|
||||
expiry, err := time.Parse(http.TimeFormat, strings.Trim(expiryTokens[1], `"`))
|
||||
if err != nil {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
// valid: represents a restored object, 'pending' expiry.
|
||||
restoreHdr: "ongoing-request=false, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
restoreHdr: `ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"`,
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: false,
|
||||
expiry: time.Date(2012, 12, 21, 0, 0, 0, 0, time.UTC),
|
||||
@@ -45,7 +45,7 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// valid: represents an ongoing restore object request.
|
||||
restoreHdr: "ongoing-request=true",
|
||||
restoreHdr: `ongoing-request="true"`,
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: true,
|
||||
},
|
||||
@@ -53,13 +53,13 @@ func TestParseRestoreObjStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// invalid; ongoing restore object request can't have expiry set on it.
|
||||
restoreHdr: "ongoing-request=true, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
restoreHdr: `ongoing-request="true", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"`,
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
{
|
||||
// invalid; completed restore object request must have expiry set on it.
|
||||
restoreHdr: "ongoing-request=false",
|
||||
restoreHdr: `ongoing-request="false"`,
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
|
||||
@@ -32,8 +32,6 @@ import (
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
_, cancel := g.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
@@ -45,7 +43,7 @@ func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.WaitErr()
|
||||
g.Wait()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
@@ -61,8 +59,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
}
|
||||
|
||||
if encodingType != "" {
|
||||
// Only url encoding type is supported
|
||||
if strings.ToLower(encodingType) != "url" {
|
||||
// AWS S3 spec only supports 'url' encoding type
|
||||
if !strings.EqualFold(encodingType, "url") {
|
||||
return ErrInvalidEncodingMethod
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
@@ -83,13 +83,13 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
if globalIsGateway && globalGatewayName != NASBackendGateway {
|
||||
if configFile == bucketPolicyConfig {
|
||||
if configData == nil {
|
||||
return objAPI.DeleteBucketPolicy(GlobalContext, bucket)
|
||||
return objAPI.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return objAPI.SetBucketPolicy(GlobalContext, bucket, config)
|
||||
return objAPI.SetBucketPolicy(ctx, bucket, config)
|
||||
}
|
||||
return NotImplemented{}
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !globalIsErasure && !globalIsDistErasure && errors.Is(err, errVolumeNotFound) {
|
||||
// Only single drive mode needs this fallback.
|
||||
@@ -148,12 +148,12 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if err := meta.Save(GlobalContext, objAPI); err != nil {
|
||||
if err := meta.Save(ctx, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Set(bucket, meta)
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
globalNotificationSys.LoadBucketMetadata(bgContext(ctx), bucket) // Do not use caller context here
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
// GetVersioningConfig returns configured versioning config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
@@ -213,7 +213,7 @@ func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error
|
||||
// GetObjectLockConfig returns configured object lock config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
@@ -245,7 +245,7 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
return meta.lifecycleConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucket}
|
||||
@@ -274,7 +274,7 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
return meta.notificationConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
// GetSSEConfig returns configured SSE config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
@@ -308,7 +308,7 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
@@ -323,8 +323,8 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er
|
||||
|
||||
// GetQuotaConfig returns configured bucket quota
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -334,7 +334,7 @@ func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota
|
||||
// GetReplicationConfig returns configured bucket replication config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
@@ -351,7 +351,7 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s
|
||||
// GetBucketTargetsConfig returns configured bucket targets for this bucket
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
|
||||
meta, err := sys.GetConfig(bucket)
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -377,7 +377,7 @@ func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (BucketMetadata, error) {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
@@ -397,7 +397,7 @@ func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
|
||||
if ok {
|
||||
return meta, nil
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
return meta, err
|
||||
}
|
||||
@@ -434,6 +434,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
_, _ = objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
|
||||
// Ensure heal opts for bucket metadata be deep healed all the time.
|
||||
ScanMode: madmin.HealDeepScan,
|
||||
Recreate: true,
|
||||
})
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -120,7 +120,7 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
configFile := path.Join(bucketMetaPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -277,7 +277,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
for _, legacyFile := range legacyConfigs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
@@ -338,7 +338,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
for legacyFile := range configs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
@@ -365,7 +365,7 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, api, configFile, data)
|
||||
}
|
||||
|
||||
@@ -375,9 +375,10 @@ func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string)
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
path.Join(replicationDir, resyncFileName),
|
||||
}
|
||||
for _, metaFile := range metadataFiles {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, metaFile)
|
||||
configFile := path.Join(bucketMetaPrefix, bucket, metaFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
bucketConfigPrefix = "buckets"
|
||||
bucketNotificationConfig = "notification.xml"
|
||||
)
|
||||
|
||||
@@ -72,8 +71,8 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalServerRegion)
|
||||
if err = config.Validate(globalServerRegion, globalNotificationSys.targetList); err != nil {
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalNotificationSys.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
@@ -145,7 +144,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerRegion, globalNotificationSys.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
@@ -161,7 +160,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -58,6 +58,10 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
||||
// enforceRetentionForDeletion checks if it is appropriate to remove an
|
||||
// object according to locking configuration when this is lifecycle/ bucket quota asking.
|
||||
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
|
||||
if objInfo.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
|
||||
lhold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
||||
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
|
||||
return true
|
||||
@@ -175,68 +179,76 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
||||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) (ObjectInfo, APIErrorCode) {
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi ObjectInfo, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) error {
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return oi, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
|
||||
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
|
||||
// Pass in relative days from current time, to additionally
|
||||
// to verify "object-lock-remaining-retention-days" policy if any.
|
||||
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
// Retention has expired you may change whatever you like.
|
||||
if ret.RetainUntilDate.Before(t) {
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred,
|
||||
owner)
|
||||
return oi, perm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch ret.Mode {
|
||||
case objectlock.RetGovernance:
|
||||
govPerm := isPutRetentionAllowed(bucket, object, days,
|
||||
govPerm := isPutRetentionAllowed(oi.Bucket, oi.Name, days,
|
||||
objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
byPassSet, r, cred, owner)
|
||||
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
|
||||
if !byPassSet {
|
||||
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
}
|
||||
return oi, govPerm
|
||||
switch govPerm {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
case objectlock.RetCompliance:
|
||||
// Compliance retention mode cannot be changed or shortened.
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
|
||||
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
compliancePerm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
false, r, cred, owner)
|
||||
return oi, compliancePerm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return oi, ErrNone
|
||||
return nil
|
||||
} // No pre-existing retention metadata present.
|
||||
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred, owner)
|
||||
return oi, perm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
|
||||
|
||||
@@ -103,7 +103,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -148,7 +148,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, nil); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -37,60 +37,72 @@ import (
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonWriteOnlyBucketPolicy(bucketName string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketMultipartUploadsAction,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.GetBucketLocationAction,
|
||||
policy.ListBucketMultipartUploadsAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonReadOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
return &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
"",
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,7 +117,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
var start = make(chan struct{})
|
||||
start := make(chan struct{})
|
||||
var ok, errs int
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
@@ -147,8 +159,8 @@ func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -333,7 +345,6 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -352,14 +363,12 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
@@ -465,7 +474,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@@ -540,7 +548,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -559,7 +566,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@@ -575,8 +581,8 @@ func TestDeleteBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{
|
||||
"Version": "2012-10-17",
|
||||
@@ -743,7 +749,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@@ -762,7 +767,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
||||
@@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
switch k {
|
||||
case ldapUser:
|
||||
args["user"] = []string{vStr}
|
||||
} else if k == ldapUserN {
|
||||
case ldapUserN:
|
||||
args["username"] = []string{vStr}
|
||||
} else {
|
||||
default:
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
@@ -199,7 +200,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
|
||||
}
|
||||
|
||||
var policyInfo miniogopolicy.BucketAccessPolicy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
@@ -217,7 +218,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
|
||||
}
|
||||
|
||||
var bucketPolicy policy.Policy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
|
||||
@@ -20,11 +20,11 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ type BucketQuotaSys struct {
|
||||
}
|
||||
|
||||
// Get - Get quota configuration.
|
||||
func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
|
||||
func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.BucketQuota, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
@@ -43,7 +43,7 @@ func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
|
||||
return &madmin.BucketQuota{}, nil
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetQuotaConfig(bucketName)
|
||||
return globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName)
|
||||
}
|
||||
|
||||
// NewBucketQuotaSys returns initialized BucketQuotaSys
|
||||
@@ -51,6 +51,35 @@ func NewBucketQuotaSys() *BucketQuotaSys {
|
||||
return &BucketQuotaSys{}
|
||||
}
|
||||
|
||||
// Init initialize bucket quota.
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
if err != nil {
|
||||
return BucketUsageInfo{}, err
|
||||
}
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if !ok {
|
||||
return BucketUsageInfo{}, fmt.Errorf("internal error: Unexpected DUI data type: %T", v)
|
||||
}
|
||||
|
||||
bui := dui.BucketsUsage[bucket]
|
||||
return bui, nil
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
|
||||
quotaCfg = &madmin.BucketQuota{}
|
||||
@@ -58,47 +87,32 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
return quotaCfg, err
|
||||
}
|
||||
if !quotaCfg.IsValid() {
|
||||
if quotaCfg.Type == "fifo" {
|
||||
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"))
|
||||
return quotaCfg, nil
|
||||
}
|
||||
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
q, err := sys.Get(bucket)
|
||||
q, err := sys.Get(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dui := v.(madmin.DataUsageInfo)
|
||||
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket not found, cannot enforce quota
|
||||
// call will fail anyways later.
|
||||
return nil
|
||||
}
|
||||
|
||||
if (bui.Size + uint64(size)) >= q.Quota {
|
||||
if bui.Size > 0 && ((bui.Size + uint64(size)) >= q.Quota) {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
@@ -106,120 +120,9 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
func enforceBucketQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if globalBucketQuotaSys == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return globalBucketQuotaSys.check(ctx, bucket, size)
|
||||
}
|
||||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota.
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
return
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if bui.Size > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = bui.Size - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
for obj := range objInfoCh {
|
||||
if obj.DeleteMarker {
|
||||
// Delete markers are automatically added for FIFO purge.
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
continue
|
||||
}
|
||||
// skip objects currently under retention
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
}
|
||||
|
||||
// If we saw less than quota we are good.
|
||||
if scorer.seenBytes <= cfg.Quota {
|
||||
return
|
||||
}
|
||||
// Calculate how much we want to delete now.
|
||||
toFreeNow := scorer.seenBytes - cfg.Quota
|
||||
// We were less over quota than we thought. Adjust so we delete less.
|
||||
// If we are more over, leave it for the next run to pick up.
|
||||
if toFreeNow < toFree {
|
||||
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
|
||||
// We got below or at quota.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var objects []ObjectToDelete
|
||||
numKeys := len(scorer.fileObjInfos())
|
||||
for i, obj := range scorer.fileObjInfos() {
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectName: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
if len(objects) < maxDeleteList && (i < numKeys-1) {
|
||||
// skip deletion until maxDeleteList or end of slice
|
||||
continue
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Deletes a list of objects.
|
||||
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
})
|
||||
for i := range deleteErrs {
|
||||
if deleteErrs[i] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: obj,
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
return globalBucketQuotaSys.enforceQuotaHard(ctx, bucket, size)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
)
|
||||
@@ -51,8 +51,10 @@ func (r *ReplicationStats) Delete(bucket string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
delete(r.Cache, bucket)
|
||||
delete(r.UsageCache, bucket)
|
||||
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
delete(r.UsageCache, bucket)
|
||||
}
|
||||
|
||||
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
||||
@@ -67,54 +69,56 @@ func (r *ReplicationStats) UpdateReplicaStat(bucket string, n int64) {
|
||||
if !ok {
|
||||
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
}
|
||||
atomic.StoreInt64(&bs.ReplicaSize, n)
|
||||
bs.ReplicaSize += n
|
||||
r.Cache[bucket] = bs
|
||||
}
|
||||
|
||||
// Update updates in-memory replication statistics with new values.
|
||||
func (r *ReplicationStats) Update(bucket string, arn string, n int64, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
func (r *ReplicationStats) Update(bucket string, arn string, n int64, duration time.Duration, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
r.RLock()
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
bs, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
r.Cache[bucket] = bs
|
||||
}
|
||||
b, ok := bs.Stats[arn]
|
||||
if !ok {
|
||||
b = &BucketReplicationStat{}
|
||||
bs.Stats[arn] = b
|
||||
}
|
||||
r.RUnlock()
|
||||
switch status {
|
||||
case replication.Completed:
|
||||
switch prevStatus { // adjust counters based on previous state
|
||||
case replication.Failed:
|
||||
atomic.AddInt64(&b.FailedCount, -1)
|
||||
b.FailedCount--
|
||||
}
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddInt64(&b.ReplicatedSize, n)
|
||||
b.ReplicatedSize += n
|
||||
switch prevStatus {
|
||||
case replication.Failed:
|
||||
atomic.AddInt64(&b.FailedSize, -1*n)
|
||||
b.FailedSize -= n
|
||||
}
|
||||
if duration > 0 {
|
||||
b.Latency.update(n, duration)
|
||||
}
|
||||
}
|
||||
case replication.Failed:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
if prevStatus == replication.Pending {
|
||||
atomic.AddInt64(&b.FailedSize, n)
|
||||
atomic.AddInt64(&b.FailedCount, 1)
|
||||
b.FailedSize += n
|
||||
b.FailedCount++
|
||||
}
|
||||
}
|
||||
case replication.Replica:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddInt64(&b.ReplicaSize, n)
|
||||
b.ReplicaSize += n
|
||||
}
|
||||
}
|
||||
r.Lock()
|
||||
bs.Stats[arn] = b
|
||||
r.Cache[bucket] = bs
|
||||
r.Unlock()
|
||||
}
|
||||
|
||||
// GetInitialUsage get replication metrics available at the time of cluster initialization
|
||||
@@ -122,44 +126,13 @@ func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats
|
||||
if r == nil {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
|
||||
r.ulock.RLock()
|
||||
|
||||
brs := BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
|
||||
defer r.ulock.RUnlock()
|
||||
st, ok := r.UsageCache[bucket]
|
||||
if ok {
|
||||
return st.Clone()
|
||||
if !ok {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
r.ulock.RUnlock()
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
||||
if err != nil {
|
||||
return brs
|
||||
}
|
||||
// data usage has not captured any data yet.
|
||||
if dataUsageInfo.LastUpdate.IsZero() {
|
||||
return brs
|
||||
}
|
||||
usage, ok := dataUsageInfo.BucketsUsage[bucket]
|
||||
if ok && usage.ReplicationInfo != nil {
|
||||
brs.ReplicaSize = int64(usage.ReplicaSize)
|
||||
for arn, uinfo := range usage.ReplicationInfo {
|
||||
brs.Stats[arn] = &BucketReplicationStat{
|
||||
FailedSize: int64(uinfo.ReplicationFailedSize),
|
||||
ReplicatedSize: int64(uinfo.ReplicatedSize),
|
||||
ReplicaSize: int64(uinfo.ReplicaSize),
|
||||
FailedCount: int64(uinfo.ReplicationFailedCount),
|
||||
}
|
||||
}
|
||||
if brs.hasReplicationUsage() {
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
r.UsageCache[bucket] = &brs
|
||||
}
|
||||
|
||||
}
|
||||
return brs
|
||||
return st.Clone()
|
||||
}
|
||||
|
||||
// Get replication metrics for a bucket from this node since this node came up.
|
||||
@@ -173,29 +146,46 @@ func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
||||
|
||||
st, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
return BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
return st.Clone()
|
||||
}
|
||||
|
||||
// NewReplicationStats initialize in-memory replication statistics
|
||||
func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *ReplicationStats {
|
||||
st := &ReplicationStats{
|
||||
return &ReplicationStats{
|
||||
Cache: make(map[string]*BucketReplicationStats),
|
||||
UsageCache: make(map[string]*BucketReplicationStats),
|
||||
}
|
||||
}
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
return st
|
||||
// load replication metrics at cluster start from initial data usage
|
||||
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
||||
rTimer := time.NewTimer(time.Minute * 1)
|
||||
defer rTimer.Stop()
|
||||
var (
|
||||
dui DataUsageInfo
|
||||
err error
|
||||
)
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-rTimer.C:
|
||||
dui, err = loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// If LastUpdate is set, data usage is available.
|
||||
if !dui.LastUpdate.IsZero() {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// data usage has not captured any data yet.
|
||||
if dataUsageInfo.LastUpdate.IsZero() {
|
||||
return st
|
||||
}
|
||||
|
||||
for bucket, usage := range dataUsageInfo.BucketsUsage {
|
||||
m := make(map[string]*BucketReplicationStats)
|
||||
for bucket, usage := range dui.BucketsUsage {
|
||||
b := &BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(usage.ReplicationInfo)),
|
||||
}
|
||||
@@ -209,9 +199,10 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio
|
||||
}
|
||||
b.ReplicaSize += int64(usage.ReplicaSize)
|
||||
if b.hasReplicationUsage() {
|
||||
st.UsageCache[bucket] = b
|
||||
m[bucket] = b
|
||||
}
|
||||
}
|
||||
|
||||
return st
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
r.UsageCache = m
|
||||
}
|
||||
|
||||
@@ -20,12 +20,15 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
|
||||
//go:generate msgp -file=$GOFILE
|
||||
@@ -34,6 +37,7 @@ import (
|
||||
type replicatedTargetInfo struct {
|
||||
Arn string
|
||||
Size int64
|
||||
Duration time.Duration
|
||||
ReplicationAction replicationAction // full or metadata only
|
||||
OpType replication.Type // whether incoming replication, existing object, healing etc..
|
||||
ReplicationStatus replication.StatusType
|
||||
@@ -172,7 +176,7 @@ func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.Sta
|
||||
type replicateTargetDecision struct {
|
||||
Replicate bool // Replicate to this target
|
||||
Synchronous bool // Synchronous replication configured.
|
||||
Arn string //ARN of replication target
|
||||
Arn string // ARN of replication target
|
||||
ID string
|
||||
}
|
||||
|
||||
@@ -501,8 +505,10 @@ func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) Repl
|
||||
var tgtStatuses map[string]replication.StatusType
|
||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||
dsc = checkReplicateDelete(GlobalContext, oi.Bucket, ObjectToDelete{
|
||||
ObjectName: oi.Name,
|
||||
VersionID: oi.VersionID,
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: oi.Name,
|
||||
VersionID: oi.VersionID,
|
||||
},
|
||||
}, oi, ObjectOptions{}, nil)
|
||||
} else {
|
||||
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(ObjectInfo{
|
||||
@@ -573,8 +579,23 @@ type ResyncTargetsInfo struct {
|
||||
|
||||
// ResyncTarget is a struct representing the Target reset ID where target is identified by its Arn
|
||||
type ResyncTarget struct {
|
||||
Arn string `json:"arn"`
|
||||
ResetID string `json:"resetid"`
|
||||
Arn string `json:"arn"`
|
||||
ResetID string `json:"resetid"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
EndTime time.Time `json:"endTime"`
|
||||
// Status of resync operation
|
||||
ResyncStatus string `json:"resyncStatus,omitempty"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize"`
|
||||
// Total number of failed operations
|
||||
FailedCount int64 `json:"failedReplicationCount"`
|
||||
// Total number of failed operations
|
||||
ReplicatedCount int64 `json:"replicationCount"`
|
||||
// Last bucket/object replicated.
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Object string `json:"object,omitempty"`
|
||||
}
|
||||
|
||||
// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication
|
||||
@@ -600,3 +621,106 @@ func (v VersionPurgeStatusType) Empty() bool {
|
||||
func (v VersionPurgeStatusType) Pending() bool {
|
||||
return v == Pending || v == Failed
|
||||
}
|
||||
|
||||
type replicationResyncState struct {
|
||||
// map of bucket to their resync status
|
||||
statusMap map[string]BucketReplicationResyncStatus
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
const (
|
||||
replicationDir = "replication"
|
||||
resyncFileName = "resync.bin"
|
||||
resyncMetaFormat = 1
|
||||
resyncMetaVersionV1 = 1
|
||||
resyncMetaVersion = resyncMetaVersionV1
|
||||
)
|
||||
|
||||
// ResyncStatusType status of resync operation
|
||||
type ResyncStatusType int
|
||||
|
||||
const (
|
||||
// NoResync - no resync in progress
|
||||
NoResync ResyncStatusType = iota
|
||||
// ResyncStarted - resync in progress
|
||||
ResyncStarted
|
||||
// ResyncCompleted - resync finished
|
||||
ResyncCompleted
|
||||
// ResyncFailed - resync failed
|
||||
ResyncFailed
|
||||
)
|
||||
|
||||
func (rt ResyncStatusType) String() string {
|
||||
switch rt {
|
||||
case ResyncStarted:
|
||||
return "Ongoing"
|
||||
case ResyncCompleted:
|
||||
return "Completed"
|
||||
case ResyncFailed:
|
||||
return "Failed"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TargetReplicationResyncStatus status of resync of bucket for a specific target
|
||||
type TargetReplicationResyncStatus struct {
|
||||
StartTime time.Time `json:"startTime" msg:"st"`
|
||||
EndTime time.Time `json:"endTime" msg:"et"`
|
||||
// Resync ID assigned to this reset
|
||||
ResyncID string `json:"resyncID" msg:"id"`
|
||||
// ResyncBeforeDate - resync all objects created prior to this date
|
||||
ResyncBeforeDate time.Time `json:"resyncBeforeDate" msg:"rdt"`
|
||||
// Status of resync operation
|
||||
ResyncStatus ResyncStatusType `json:"resyncStatus" msg:"rst"`
|
||||
// Failed size in bytes
|
||||
FailedSize int64 `json:"failedReplicationSize" msg:"fs"`
|
||||
// Total number of failed operations
|
||||
FailedCount int64 `json:"failedReplicationCount" msg:"frc"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize int64 `json:"completedReplicationSize" msg:"rs"`
|
||||
// Total number of failed operations
|
||||
ReplicatedCount int64 `json:"replicationCount" msg:"rrc"`
|
||||
// Last bucket/object replicated.
|
||||
Bucket string `json:"-" msg:"bkt"`
|
||||
Object string `json:"-" msg:"obj"`
|
||||
}
|
||||
|
||||
// BucketReplicationResyncStatus captures current replication resync status
|
||||
type BucketReplicationResyncStatus struct {
|
||||
Version int `json:"version" msg:"v"`
|
||||
// map of remote arn to their resync status for a bucket
|
||||
TargetsMap map[string]TargetReplicationResyncStatus `json:"resyncMap,omitempty" msg:"brs"`
|
||||
ID int `json:"id" msg:"id"`
|
||||
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
|
||||
}
|
||||
|
||||
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
|
||||
return BucketReplicationResyncStatus{
|
||||
TargetsMap: make(map[string]TargetReplicationResyncStatus),
|
||||
Version: resyncMetaVersion,
|
||||
}
|
||||
}
|
||||
|
||||
var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
|
||||
|
||||
// parse size from content-range header
|
||||
func parseSizeFromContentRange(h http.Header) (sz int64, err error) {
|
||||
cr := h.Get(xhttp.ContentRange)
|
||||
if cr == "" {
|
||||
return sz, fmt.Errorf("Content-Range not set")
|
||||
}
|
||||
parts := contentRangeRegexp.FindStringSubmatch(cr)
|
||||
if len(parts) != 4 {
|
||||
return sz, fmt.Errorf("invalid Content-Range header %s", cr)
|
||||
}
|
||||
if parts[3] == "*" {
|
||||
return -1, nil
|
||||
}
|
||||
var usz uint64
|
||||
usz, err = strconv.ParseUint(parts[3], 10, 64)
|
||||
if err != nil {
|
||||
return sz, err
|
||||
}
|
||||
return int64(usz), nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBucketReplicationResyncStatus(t *testing.T) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketReplicationResyncStatus(t *testing.T) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketReplicationResyncStatus Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketReplicationResyncStatus{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketReplicationResyncStatus(b *testing.B) {
|
||||
v := BucketReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicateDecision(t *testing.T) {
|
||||
v := ReplicateDecision{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@@ -686,3 +799,116 @@ func BenchmarkDecodeResyncTargetsInfo(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalTargetReplicationResyncStatus(t *testing.T) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeTargetReplicationResyncStatus(t *testing.T) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeTargetReplicationResyncStatus Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := TargetReplicationResyncStatus{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeTargetReplicationResyncStatus(b *testing.B) {
|
||||
v := TargetReplicationResyncStatus{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ var replicatedInfosTests = []struct {
|
||||
expectedOpType replication.Type
|
||||
expectedAction replicationAction
|
||||
}{
|
||||
{ //1. empty tgtInfos slice
|
||||
{ // 1. empty tgtInfos slice
|
||||
name: "no replicated targets",
|
||||
tgtInfos: []replicatedTargetInfo{},
|
||||
expectedCompletedSize: 0,
|
||||
@@ -41,7 +41,7 @@ var replicatedInfosTests = []struct {
|
||||
expectedOpType: replication.UnsetReplicationType,
|
||||
expectedAction: replicateNone,
|
||||
},
|
||||
{ //2. replication completed to single target
|
||||
{ // 2. replication completed to single target
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
@@ -59,7 +59,7 @@ var replicatedInfosTests = []struct {
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ //3. replication completed to single target; failed to another
|
||||
{ // 3. replication completed to single target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
@@ -77,14 +77,15 @@ var replicatedInfosTests = []struct {
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 249,
|
||||
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ //4. replication pending on one target; failed to another
|
||||
{ // 4. replication pending on one target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
@@ -102,7 +103,8 @@ var replicatedInfosTests = []struct {
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 0,
|
||||
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
@@ -137,7 +139,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
expDsc ReplicateDecision
|
||||
expErr error
|
||||
}{
|
||||
{ //1.
|
||||
{ // 1.
|
||||
name: "empty string",
|
||||
dsc: "",
|
||||
expDsc: ReplicateDecision{
|
||||
@@ -146,7 +148,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
expErr: nil,
|
||||
},
|
||||
|
||||
{ //2.
|
||||
{ // 2.
|
||||
name: "replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: nil,
|
||||
@@ -156,7 +158,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ //3.
|
||||
{ // 3.
|
||||
name: "replicate decision for multiple targets",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
|
||||
expErr: nil,
|
||||
@@ -167,7 +169,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ //4.
|
||||
{ // 4.
|
||||
name: "invalid format replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: errInvalidReplicateDecisionFormat,
|
||||
@@ -181,9 +183,7 @@ var parseReplicationDecisionTest = []struct {
|
||||
|
||||
func TestParseReplicateDecision(t *testing.T) {
|
||||
for i, test := range parseReplicationDecisionTest {
|
||||
//dsc, err := parseReplicateDecision(test.dsc)
|
||||
dsc, err := parseReplicateDecision(test.expDsc.String())
|
||||
|
||||
if err != nil {
|
||||
if test.expErr != err {
|
||||
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
||||
@@ -208,22 +208,22 @@ var replicationStateTest = []struct {
|
||||
arn string
|
||||
expStatus replication.StatusType
|
||||
}{
|
||||
{ //1. no replication status header
|
||||
{ // 1. no replication status header
|
||||
name: "no replicated targets",
|
||||
rs: ReplicationState{},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ //2. replication status for one target
|
||||
{ // 2. replication status for one target
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
|
||||
expStatus: replication.Pending,
|
||||
},
|
||||
{ //3. replication status for one target - incorrect format
|
||||
{ // 3. replication status for one target - incorrect format
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ //4. replication status for 3 targets, one of them failed
|
||||
{ // 4. replication status for 3 targets, one of them failed
|
||||
name: "replication status for 3 targets - one failed",
|
||||
rs: ReplicationState{
|
||||
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
|
||||
@@ -231,7 +231,7 @@ var replicationStateTest = []struct {
|
||||
},
|
||||
expStatus: replication.Failed,
|
||||
},
|
||||
{ //5. replication status for replica version
|
||||
{ // 5. replication status for replica version
|
||||
name: "replication status for replica version",
|
||||
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
|
||||
expStatus: replication.Replica,
|
||||
|
||||
@@ -19,14 +19,19 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
@@ -61,6 +66,8 @@ const (
|
||||
ObjectLockRetentionTimestamp = "objectlock-retention-timestamp"
|
||||
// ObjectLockLegalHoldTimestamp - the last time a legal hold metadata modification happened on this cluster for this object version
|
||||
ObjectLockLegalHoldTimestamp = "objectlock-legalhold-timestamp"
|
||||
// ReplicationWorkerMultiplier is suggested worker multiplier if traffic exceeds replication worker capacity
|
||||
ReplicationWorkerMultiplier = 1.5
|
||||
)
|
||||
|
||||
// gets replication config associated to a given bucket name.
|
||||
@@ -136,6 +143,7 @@ func (o mustReplicateOptions) ReplicationStatus() (s replication.StatusType) {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
||||
return o.opType == replication.ExistingObjectReplicationType
|
||||
}
|
||||
@@ -143,6 +151,7 @@ func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
||||
func (o mustReplicateOptions) isMetadataReplication() bool {
|
||||
return o.opType == replication.MetadataReplicationType
|
||||
}
|
||||
|
||||
func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
||||
if !op.Valid() {
|
||||
op = replication.ObjectReplicationType
|
||||
@@ -433,12 +442,12 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
// to decrement pending count later.
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus {
|
||||
globalReplicationStats.Update(dobj.Bucket, rinfo.Arn, 0, replicationStatus,
|
||||
globalReplicationStats.Update(dobj.Bucket, rinfo.Arn, 0, 0, replicationStatus,
|
||||
prevStatus, replication.DeleteReplicationType)
|
||||
}
|
||||
}
|
||||
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
eventName := event.ObjectReplicationComplete
|
||||
if replicationStatus == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
@@ -514,19 +523,18 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
}
|
||||
return
|
||||
}
|
||||
// early return if already replicated delete marker for existing object replication
|
||||
if dobj.DeleteMarkerVersionID != "" && dobj.OpType == replication.ExistingObjectReplicationType {
|
||||
// early return if already replicated delete marker for existing object replication/ healing delete markers
|
||||
if dobj.DeleteMarkerVersionID != "" && (dobj.OpType == replication.ExistingObjectReplicationType || dobj.OpType == replication.HealReplicationType) {
|
||||
if _, err := tgt.StatObject(ctx, tgt.Bucket, dobj.ObjectName, miniogo.StatObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
}}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
||||
},
|
||||
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
||||
if dobj.VersionID == "" {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
} else {
|
||||
rinfo.VersionPurgeStatus = Complete
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -724,7 +732,7 @@ const (
|
||||
// matches k1 with all keys, returns 'true' if one of them matches
|
||||
func equals(k1 string, keys ...string) bool {
|
||||
for _, k2 := range keys {
|
||||
if strings.EqualFold(strings.ToLower(k1), strings.ToLower(k2)) {
|
||||
if strings.EqualFold(k1, k2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -765,7 +773,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
|
||||
}
|
||||
|
||||
t, _ := tags.ParseObjectTags(oi1.UserTags)
|
||||
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) {
|
||||
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) || (oi2.UserTagCount != len(t.ToMap())) {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
||||
@@ -856,8 +864,9 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
return
|
||||
}
|
||||
tgtArns := cfg.FilterTargetArns(replication.ObjectOpts{
|
||||
Name: object,
|
||||
SSEC: crypto.SSEC.IsEncrypted(objInfo.UserDefined),
|
||||
Name: object,
|
||||
SSEC: crypto.SSEC.IsEncrypted(objInfo.UserDefined),
|
||||
UserTags: objInfo.UserTags,
|
||||
})
|
||||
// Lock the object name before starting replication.
|
||||
// Use separate lock that doesn't collide with regular objects.
|
||||
@@ -901,7 +910,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
eventName := event.ObjectReplicationComplete
|
||||
if rinfos.ReplicationStatus() == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
@@ -910,24 +919,24 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
// metadata should be updated with last resync timestamp.
|
||||
if objInfo.ReplicationStatusInternal != newReplStatusInternal || rinfos.ReplicationResynced() {
|
||||
popts := ObjectOptions{
|
||||
MTime: objInfo.ModTime,
|
||||
VersionID: objInfo.VersionID,
|
||||
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
popts.UserDefined[k] = v
|
||||
}
|
||||
popts.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = newReplStatusInternal
|
||||
popts.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
popts.UserDefined[xhttp.AmzBucketReplicationStatus] = string(rinfos.ReplicationStatus())
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ResyncTimestamp != "" {
|
||||
popts.UserDefined[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
|
||||
}
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
popts.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
MTime: objInfo.ModTime,
|
||||
VersionID: objInfo.VersionID,
|
||||
EvalMetadataFn: func(oi ObjectInfo) error {
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = newReplStatusInternal
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
oi.UserDefined[xhttp.AmzBucketReplicationStatus] = string(rinfos.ReplicationStatus())
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ResyncTimestamp != "" {
|
||||
oi.UserDefined[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
|
||||
}
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
oi.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w",
|
||||
bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
@@ -938,7 +947,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
}
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus {
|
||||
globalReplicationStats.Update(bucket, rinfo.Arn, rinfo.Size, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus, opType)
|
||||
globalReplicationStats.Update(bucket, rinfo.Arn, rinfo.Size, rinfo.Duration, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus, opType)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -963,6 +972,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
// replicateObjectToTarget replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, objectAPI ObjectLayer, tgt *TargetClient) (rinfo replicatedTargetInfo) {
|
||||
startTime := time.Now()
|
||||
objInfo := ri.ObjectInfo.Clone()
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
@@ -991,6 +1001,7 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
OpType: ri.OpType,
|
||||
ReplicationAction: rAction,
|
||||
}
|
||||
|
||||
if ri.ObjectInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
rinfo.ReplicationResynced = true
|
||||
@@ -1050,13 +1061,21 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
})
|
||||
return rinfo
|
||||
}
|
||||
defer func() {
|
||||
if rinfo.ReplicationStatus == replication.Completed && ri.OpType == replication.ExistingObjectReplicationType && tgt.ResetID != "" {
|
||||
rinfo.ResyncTimestamp = fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), tgt.ResetID)
|
||||
rinfo.ReplicationResynced = true
|
||||
}
|
||||
rinfo.Duration = time.Since(startTime)
|
||||
}()
|
||||
|
||||
rAction = replicateAll
|
||||
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, miniogo.StatObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
}})
|
||||
},
|
||||
})
|
||||
if cerr == nil {
|
||||
rAction = getReplicationAction(objInfo, oi, ri.OpType)
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
@@ -1081,13 +1100,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
// Note: Replication Stats would have been updated despite metadata update failure.
|
||||
gr.Close()
|
||||
closeOnDefer = false
|
||||
return replicatedTargetInfo{
|
||||
ReplicationStatus: replication.Completed,
|
||||
Size: sz,
|
||||
Arn: tgt.ARN,
|
||||
ReplicationAction: rAction,
|
||||
PrevReplicationStatus: objInfo.TargetReplicationStatus(tgt.ARN),
|
||||
}
|
||||
rinfo.ReplicationAction = rAction
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1095,12 +1109,6 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
rinfo.Size = size
|
||||
rinfo.ReplicationAction = rAction
|
||||
defer func() {
|
||||
if rinfo.ReplicationStatus == replication.Completed && ri.OpType == replication.ExistingObjectReplicationType && tgt.ResetID != "" {
|
||||
rinfo.ResyncTimestamp = fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), tgt.ResetID)
|
||||
rinfo.ReplicationResynced = true
|
||||
}
|
||||
}()
|
||||
// use core client to avoid doing multipart on PUT
|
||||
c := &miniogo.Core{Client: tgt.Client}
|
||||
if rAction != replicateAll {
|
||||
@@ -1114,7 +1122,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
}}
|
||||
},
|
||||
}
|
||||
if _, err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); err != nil {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
@@ -1176,9 +1185,17 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// block and abort remote upload upon failure.
|
||||
if aerr := c.AbortMultipartUpload(ctx, bucket, object, uploadID); aerr != nil {
|
||||
aerr = fmt.Errorf("Unable to cleanup failed multipart replication %s on remote %s/%s: %w", uploadID, bucket, object, aerr)
|
||||
logger.LogIf(ctx, aerr)
|
||||
attempts := 1
|
||||
for attempts <= 3 {
|
||||
aerr := c.AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
if aerr == nil {
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx,
|
||||
fmt.Errorf("Trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
|
||||
humanize.Ordinal(attempts), uploadID, bucket, object, aerr))
|
||||
attempts++
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -1210,7 +1227,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
// always set this to distinguish between `mc mirror` replication and serverside
|
||||
ReplicationRequest: true,
|
||||
}})
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1289,6 +1307,7 @@ type ReplicationPool struct {
|
||||
existingReplicaDeleteCh chan DeletedObjectReplicationInfo
|
||||
workerSize int
|
||||
mrfWorkerSize int
|
||||
resyncState replicationResyncState
|
||||
workerWg sync.WaitGroup
|
||||
mrfWorkerWg sync.WaitGroup
|
||||
once sync.Once
|
||||
@@ -1305,6 +1324,7 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool
|
||||
mrfWorkerKillCh: make(chan struct{}, opts.FailedWorkers),
|
||||
existingReplicaCh: make(chan ReplicateObjectInfo, 100000),
|
||||
existingReplicaDeleteCh: make(chan DeletedObjectReplicationInfo, 100000),
|
||||
resyncState: replicationResyncState{statusMap: make(map[string]BucketReplicationResyncStatus)},
|
||||
ctx: ctx,
|
||||
objLayer: o,
|
||||
}
|
||||
@@ -1312,6 +1332,7 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool
|
||||
pool.ResizeWorkers(opts.Workers)
|
||||
pool.ResizeFailedWorkers(opts.FailedWorkers)
|
||||
go pool.AddExistingObjectReplicateWorker()
|
||||
go pool.periodicResyncMetaSave(ctx, o)
|
||||
return pool
|
||||
}
|
||||
|
||||
@@ -1354,7 +1375,6 @@ func (p *ReplicationPool) AddWorker() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// AddExistingObjectReplicateWorker adds a worker to queue existing objects that need to be sync'd
|
||||
@@ -1409,6 +1429,14 @@ func (p *ReplicationPool) ResizeFailedWorkers(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
// suggestedWorkers recommends an increase in number of workers to meet replication load.
|
||||
func (p *ReplicationPool) suggestedWorkers(failQueue bool) int {
|
||||
if failQueue {
|
||||
return int(float64(p.mrfWorkerSize) * ReplicationWorkerMultiplier)
|
||||
}
|
||||
return int(float64(p.workerSize) * ReplicationWorkerMultiplier)
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaFailedTask(ri ReplicateObjectInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
@@ -1422,6 +1450,7 @@ func (p *ReplicationPool) queueReplicaFailedTask(ri ReplicateObjectInfo) {
|
||||
})
|
||||
case p.mrfReplicaCh <- ri:
|
||||
default:
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Replication failed workers could not keep up with healing failures - consider increasing number of replication failed workers with `mc admin config set api replication_failed_workers=%d`", p.suggestedWorkers(true)), replicationSubsystem)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1447,6 +1476,7 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
||||
})
|
||||
case ch <- ri:
|
||||
default:
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Replication workers could not keep up with incoming traffic - consider increasing number of replication workers with `mc admin config set api replication_workers=%d`", p.suggestedWorkers(false)), replicationSubsystem)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1483,6 +1513,7 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
|
||||
})
|
||||
case ch <- doi:
|
||||
default:
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Replication workers could not keep up with incoming traffic - consider increasing number of replication workers with `mc admin config set api replication_workers=%d`", p.suggestedWorkers(false)), replicationSubsystem)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1497,18 +1528,24 @@ func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
FailedWorkers: globalAPIConfig.getReplicationFailedWorkers(),
|
||||
})
|
||||
globalReplicationStats = NewReplicationStats(ctx, objectAPI)
|
||||
go globalReplicationStats.loadInitialReplicationMetrics(ctx)
|
||||
}
|
||||
|
||||
type proxyResult struct {
|
||||
Proxy bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// get Reader from replication target if active-active replication is in place and
|
||||
// this node returns a 404
|
||||
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy bool) {
|
||||
tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, opts, proxyTargets)
|
||||
if !proxy {
|
||||
return nil, false
|
||||
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
|
||||
tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
|
||||
if !proxy.Proxy {
|
||||
return nil, proxy, nil
|
||||
}
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
fn, _, _, err := NewGetObjectReader(nil, oi, opts)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
return nil, proxy, err
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
VersionID: opts.VersionID,
|
||||
@@ -1516,30 +1553,40 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "true",
|
||||
},
|
||||
PartNumber: opts.PartNumber,
|
||||
}
|
||||
// get correct offsets for encrypted object
|
||||
if off >= 0 && length >= 0 {
|
||||
if err := gopts.SetRange(off, off+length-1); err != nil {
|
||||
return nil, false
|
||||
if rs != nil {
|
||||
h, err := rs.ToHeader()
|
||||
if err != nil {
|
||||
return nil, proxy, err
|
||||
}
|
||||
gopts.Set(xhttp.Range, h)
|
||||
}
|
||||
// Make sure to match ETag when proxying.
|
||||
if err = gopts.SetMatchETag(oi.ETag); err != nil {
|
||||
return nil, false
|
||||
return nil, proxy, err
|
||||
}
|
||||
c := miniogo.Core{Client: tgt.Client}
|
||||
obj, _, _, err := c.GetObject(ctx, bucket, object, gopts)
|
||||
obj, _, h, err := c.GetObject(ctx, bucket, object, gopts)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
return nil, proxy, err
|
||||
}
|
||||
closeReader := func() { obj.Close() }
|
||||
|
||||
reader, err := fn(obj, h, closeReader)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
return nil, proxy, err
|
||||
}
|
||||
reader.ObjInfo = oi.Clone()
|
||||
return reader, true
|
||||
if rs != nil {
|
||||
contentSize, err := parseSizeFromContentRange(h)
|
||||
if err != nil {
|
||||
return nil, proxy, err
|
||||
}
|
||||
reader.ObjInfo.Size = contentSize
|
||||
}
|
||||
|
||||
return reader, proxyResult{Proxy: true}, nil
|
||||
}
|
||||
|
||||
func getproxyTargets(ctx context.Context, bucket, object string, opts ObjectOptions) (tgts *madmin.BucketTargets) {
|
||||
@@ -1558,11 +1605,11 @@ func getproxyTargets(ctx context.Context, bucket, object string, opts ObjectOpti
|
||||
return tgts
|
||||
}
|
||||
|
||||
func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (tgt *TargetClient, oi ObjectInfo, proxy bool) {
|
||||
func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (tgt *TargetClient, oi ObjectInfo, proxy proxyResult) {
|
||||
// this option is set when active-active replication is in place between site A -> B,
|
||||
// and site B does not have the object yet.
|
||||
if opts.ProxyRequest || (opts.ProxyHeaderSet && !opts.ProxyRequest) { // true only when site B sets MinIOSourceProxyRequest header
|
||||
return nil, oi, false
|
||||
return nil, oi, proxy
|
||||
}
|
||||
for _, t := range proxyTargets.Targets {
|
||||
tgt = globalBucketTargetSys.GetRemoteTargetClient(ctx, t.Arn)
|
||||
@@ -1580,9 +1627,22 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "true",
|
||||
},
|
||||
PartNumber: opts.PartNumber,
|
||||
}
|
||||
if rs != nil {
|
||||
h, err := rs.ToHeader()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
|
||||
continue
|
||||
}
|
||||
gopts.Set(xhttp.Range, h)
|
||||
}
|
||||
|
||||
objInfo, err := tgt.StatObject(ctx, t.TargetBucket, object, gopts)
|
||||
if err != nil {
|
||||
if isErrInvalidRange(ErrorRespToObjectError(err, bucket, object)) {
|
||||
return nil, oi, proxyResult{Err: err}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1613,15 +1673,15 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
|
||||
if ok {
|
||||
oi.ContentEncoding = ce
|
||||
}
|
||||
return tgt, oi, true
|
||||
return tgt, oi, proxyResult{Proxy: true}
|
||||
}
|
||||
return nil, oi, false
|
||||
return nil, oi, proxy
|
||||
}
|
||||
|
||||
// get object info from replication target if active-active replication is in place and
|
||||
// this node returns a 404
|
||||
func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (oi ObjectInfo, proxy bool) {
|
||||
_, oi, proxy = proxyHeadToRepTarget(ctx, bucket, object, opts, proxyTargets)
|
||||
func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (oi ObjectInfo, proxy proxyResult) {
|
||||
_, oi, proxy = proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
|
||||
return oi, proxy
|
||||
}
|
||||
|
||||
@@ -1633,7 +1693,7 @@ func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer,
|
||||
}
|
||||
if sz, err := objInfo.GetActualSize(); err == nil {
|
||||
for arn := range dsc.targetsMap {
|
||||
globalReplicationStats.Update(objInfo.Bucket, arn, sz, objInfo.ReplicationStatus, replication.StatusType(""), opType)
|
||||
globalReplicationStats.Update(objInfo.Bucket, arn, sz, 0, objInfo.ReplicationStatus, replication.StatusType(""), opType)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1641,10 +1701,10 @@ func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer,
|
||||
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectReplicationInfo, o ObjectLayer) {
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
for arn := range dv.ReplicationState.Targets {
|
||||
globalReplicationStats.Update(dv.Bucket, arn, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
|
||||
globalReplicationStats.Update(dv.Bucket, arn, 0, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
|
||||
}
|
||||
for arn := range dv.ReplicationState.PurgeTargets {
|
||||
globalReplicationStats.Update(dv.Bucket, arn, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
|
||||
globalReplicationStats.Update(dv.Bucket, arn, 0, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1656,6 +1716,7 @@ type replicationConfig struct {
|
||||
func (c replicationConfig) Empty() bool {
|
||||
return c.Config == nil
|
||||
}
|
||||
|
||||
func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
|
||||
return c.Config.Replicate(opts)
|
||||
}
|
||||
@@ -1679,7 +1740,8 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: oi.VersionID,
|
||||
OpType: replication.DeleteReplicationType,
|
||||
ExistingObject: true}
|
||||
ExistingObject: true,
|
||||
}
|
||||
|
||||
tgtArns := c.Config.FilterTargetArns(opts)
|
||||
// indicates no matching target with Existing object replication enabled.
|
||||
@@ -1739,7 +1801,7 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
|
||||
}
|
||||
rs, ok := oi.UserDefined[targetResetHeader(arn)]
|
||||
if !ok {
|
||||
rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] //for backward compatibility
|
||||
rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] // for backward compatibility
|
||||
}
|
||||
if !ok { // existing object replication is enabled and object version is unreplicated so far.
|
||||
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested
|
||||
@@ -1767,3 +1829,404 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
|
||||
rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate)
|
||||
return
|
||||
}
|
||||
|
||||
// get the most current of in-memory replication stats and data usage info from crawler.
|
||||
func getLatestReplicationStats(bucket string, u BucketUsageInfo) (s BucketReplicationStats) {
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
|
||||
// accumulate cluster bucket stats
|
||||
stats := make(map[string]*BucketReplicationStat)
|
||||
var totReplicaSize int64
|
||||
for _, bucketStat := range bucketStats {
|
||||
totReplicaSize += bucketStat.ReplicationStats.ReplicaSize
|
||||
for arn, stat := range bucketStat.ReplicationStats.Stats {
|
||||
oldst := stats[arn]
|
||||
if oldst == nil {
|
||||
oldst = &BucketReplicationStat{}
|
||||
}
|
||||
stats[arn] = &BucketReplicationStat{
|
||||
FailedCount: stat.FailedCount + oldst.FailedCount,
|
||||
FailedSize: stat.FailedSize + oldst.FailedSize,
|
||||
ReplicatedSize: stat.ReplicatedSize + oldst.ReplicatedSize,
|
||||
Latency: stat.Latency.merge(oldst.Latency),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add initial usage stat to cluster stats
|
||||
usageStat := globalReplicationStats.GetInitialUsage(bucket)
|
||||
totReplicaSize += usageStat.ReplicaSize
|
||||
for arn, stat := range usageStat.Stats {
|
||||
st, ok := stats[arn]
|
||||
if !ok {
|
||||
st = &BucketReplicationStat{}
|
||||
stats[arn] = st
|
||||
}
|
||||
st.ReplicatedSize += stat.ReplicatedSize
|
||||
st.FailedSize += stat.FailedSize
|
||||
st.FailedCount += stat.FailedCount
|
||||
}
|
||||
s = BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(stats)),
|
||||
}
|
||||
var latestTotReplicatedSize int64
|
||||
for _, st := range u.ReplicationInfo {
|
||||
latestTotReplicatedSize += int64(st.ReplicatedSize)
|
||||
}
|
||||
// normalize computed real time stats with latest usage stat
|
||||
for arn, tgtstat := range stats {
|
||||
st := BucketReplicationStat{}
|
||||
bu, ok := u.ReplicationInfo[arn]
|
||||
if !ok {
|
||||
bu = BucketTargetUsageInfo{}
|
||||
}
|
||||
// use in memory replication stats if it is ahead of usage info.
|
||||
st.ReplicatedSize = int64(bu.ReplicatedSize)
|
||||
if tgtstat.ReplicatedSize >= int64(bu.ReplicatedSize) {
|
||||
st.ReplicatedSize = tgtstat.ReplicatedSize
|
||||
}
|
||||
s.ReplicatedSize += st.ReplicatedSize
|
||||
// Reset FailedSize and FailedCount to 0 for negative overflows which can
|
||||
// happen since data usage picture can lag behind actual usage state at the time of cluster start
|
||||
st.FailedSize = int64(math.Max(float64(tgtstat.FailedSize), 0))
|
||||
st.FailedCount = int64(math.Max(float64(tgtstat.FailedCount), 0))
|
||||
st.Latency = tgtstat.Latency
|
||||
|
||||
s.Stats[arn] = &st
|
||||
s.FailedSize += st.FailedSize
|
||||
s.FailedCount += st.FailedCount
|
||||
}
|
||||
// normalize overall stats
|
||||
s.ReplicaSize = int64(math.Max(float64(totReplicaSize), float64(u.ReplicaSize)))
|
||||
s.ReplicatedSize = int64(math.Max(float64(s.ReplicatedSize), float64(latestTotReplicatedSize)))
|
||||
return s
|
||||
}
|
||||
|
||||
const resyncTimeInterval = time.Minute * 10
|
||||
|
||||
// periodicResyncMetaSave saves in-memory resync meta stats to disk in periodic intervals
|
||||
func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI ObjectLayer) {
|
||||
resyncTimer := time.NewTimer(resyncTimeInterval)
|
||||
defer resyncTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-resyncTimer.C:
|
||||
resyncTimer.Reset(resyncTimeInterval)
|
||||
now := UTCNow()
|
||||
p.resyncState.RLock()
|
||||
for bucket, brs := range p.resyncState.statusMap {
|
||||
var updt bool
|
||||
for _, st := range brs.TargetsMap {
|
||||
// if resync in progress or just ended, needs to save to disk
|
||||
if st.EndTime.Equal(timeSentinel) || now.Sub(st.EndTime) <= resyncTimeInterval {
|
||||
updt = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if updt {
|
||||
brs.LastUpdate = now
|
||||
if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Could not save resync metadata to disk for %s - %w", bucket, err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
p.resyncState.RUnlock()
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resyncBucket resyncs all qualifying objects as per replication rules for the target
|
||||
// ARN
|
||||
func resyncBucket(ctx context.Context, bucket, arn string, heal bool, objectAPI ObjectLayer) {
|
||||
resyncStatus := ResyncFailed
|
||||
defer func() {
|
||||
globalReplicationPool.resyncState.Lock()
|
||||
m := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
st := m.TargetsMap[arn]
|
||||
st.EndTime = UTCNow()
|
||||
st.ResyncStatus = resyncStatus
|
||||
m.TargetsMap[arn] = st
|
||||
globalReplicationPool.resyncState.Unlock()
|
||||
}()
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Replication resync of %s for arn %s failed with %w", bucket, arn, err))
|
||||
return
|
||||
}
|
||||
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Replication resync of %s for arn %s failed %w", bucket, arn, err))
|
||||
return
|
||||
}
|
||||
rcfg := replicationConfig{
|
||||
Config: cfg,
|
||||
remotes: tgts,
|
||||
}
|
||||
tgtArns := cfg.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn,
|
||||
})
|
||||
if len(tgtArns) != 1 {
|
||||
logger.LogIf(ctx, fmt.Errorf("Replication resync failed for %s - arn specified %s is missing in the replication config", bucket, arn))
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Replication resync failed for %s - target could not be created for arn %s", bucket, arn))
|
||||
return
|
||||
}
|
||||
|
||||
// Walk through all object versions - note ascending order of walk needed to ensure delete marker replicated to
|
||||
// target after object version is first created.
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkAscending: true}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
m := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
st := m.TargetsMap[arn]
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
var lastCheckpoint string
|
||||
if st.ResyncStatus == ResyncStarted || st.ResyncStatus == ResyncFailed {
|
||||
lastCheckpoint = st.Object
|
||||
}
|
||||
for obj := range objInfoCh {
|
||||
if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name {
|
||||
continue
|
||||
}
|
||||
lastCheckpoint = ""
|
||||
|
||||
roi := getHealReplicateObjectInfo(obj, rcfg)
|
||||
if !roi.ExistingObjResync.mustResync() {
|
||||
continue
|
||||
}
|
||||
|
||||
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
|
||||
|
||||
versionID := ""
|
||||
dmVersionID := ""
|
||||
if roi.VersionPurgeStatus.Empty() {
|
||||
dmVersionID = roi.VersionID
|
||||
} else {
|
||||
versionID = roi.VersionID
|
||||
}
|
||||
|
||||
doi := DeletedObjectReplicationInfo{
|
||||
DeletedObject: DeletedObject{
|
||||
ObjectName: roi.Name,
|
||||
DeleteMarkerVersionID: dmVersionID,
|
||||
VersionID: versionID,
|
||||
ReplicationState: roi.getReplicationState(roi.Dsc.String(), versionID, true),
|
||||
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
|
||||
DeleteMarker: roi.DeleteMarker,
|
||||
},
|
||||
Bucket: roi.Bucket,
|
||||
OpType: replication.ExistingObjectReplicationType,
|
||||
}
|
||||
replicateDelete(ctx, doi, objectAPI, ReplicateDelete)
|
||||
} else {
|
||||
roi.OpType = replication.ExistingObjectReplicationType
|
||||
replicateObject(ctx, roi, objectAPI, ReplicateExisting)
|
||||
}
|
||||
_, err = tgt.StatObject(ctx, tgt.Bucket, roi.Name, miniogo.StatObjectOptions{
|
||||
VersionID: roi.VersionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
},
|
||||
})
|
||||
globalReplicationPool.resyncState.Lock()
|
||||
m = globalReplicationPool.resyncState.statusMap[bucket]
|
||||
st = m.TargetsMap[arn]
|
||||
st.Object = roi.Name
|
||||
if err != nil {
|
||||
if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, bucket, roi.Name)) {
|
||||
st.ReplicatedCount++
|
||||
} else {
|
||||
st.FailedCount++
|
||||
}
|
||||
} else {
|
||||
st.ReplicatedCount++
|
||||
st.ReplicatedSize += roi.Size
|
||||
}
|
||||
m.TargetsMap[arn] = st
|
||||
globalReplicationPool.resyncState.Unlock()
|
||||
}
|
||||
resyncStatus = ResyncCompleted
|
||||
}
|
||||
|
||||
// start replication resync for the remote target ARN specified
|
||||
func startReplicationResync(ctx context.Context, bucket, arn, resyncID string, resyncBeforeDate time.Time, objAPI ObjectLayer) error {
|
||||
if bucket == "" {
|
||||
return fmt.Errorf("bucket name is empty")
|
||||
}
|
||||
if arn == "" {
|
||||
return fmt.Errorf("target ARN specified for resync is empty")
|
||||
}
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tgtArns := cfg.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
return fmt.Errorf("arn %s specified for resync not found in replication config", arn)
|
||||
}
|
||||
|
||||
data, err := loadBucketResyncMetadata(ctx, bucket, objAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// validate if resync is in progress for this arn
|
||||
for tArn, st := range data.TargetsMap {
|
||||
if arn == tArn && st.ResyncStatus == ResyncStarted {
|
||||
return fmt.Errorf("Resync of bucket %s is already in progress for remote bucket %s", bucket, arn)
|
||||
}
|
||||
}
|
||||
|
||||
status := TargetReplicationResyncStatus{
|
||||
ResyncID: resyncID,
|
||||
ResyncBeforeDate: resyncBeforeDate,
|
||||
StartTime: UTCNow(),
|
||||
ResyncStatus: ResyncStarted,
|
||||
Bucket: bucket,
|
||||
}
|
||||
data.TargetsMap[arn] = status
|
||||
if err = saveResyncStatus(ctx, bucket, data, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
globalReplicationPool.resyncState.Lock()
|
||||
defer globalReplicationPool.resyncState.Unlock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
if !ok {
|
||||
brs = BucketReplicationResyncStatus{
|
||||
Version: resyncMetaVersion,
|
||||
TargetsMap: make(map[string]TargetReplicationResyncStatus),
|
||||
}
|
||||
}
|
||||
brs.TargetsMap[arn] = status
|
||||
globalReplicationPool.resyncState.statusMap[bucket] = brs
|
||||
go resyncBucket(GlobalContext, bucket, arn, false, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete resync metadata from replication resync state in memory
|
||||
func (p *ReplicationPool) deleteResyncMetadata(ctx context.Context, bucket string) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
p.resyncState.Lock()
|
||||
delete(p.resyncState.statusMap, bucket)
|
||||
defer p.resyncState.Unlock()
|
||||
}
|
||||
|
||||
// initResync - initializes bucket replication resync for all buckets.
|
||||
func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
// replication applies only to erasure coded setups
|
||||
if !globalIsErasure {
|
||||
return nil
|
||||
}
|
||||
// Load bucket metadata sys in background
|
||||
go p.loadResync(ctx, buckets, objAPI)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loads bucket replication resync statuses into memory.
|
||||
func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
|
||||
for index := range buckets {
|
||||
meta, err := loadBucketResyncMetadata(ctx, buckets[index].Name, objAPI)
|
||||
if err != nil {
|
||||
if errors.Is(err, errVolumeNotFound) {
|
||||
meta = newBucketResyncStatus(buckets[index].Name)
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
p.resyncState.statusMap[buckets[index].Name] = meta
|
||||
}
|
||||
for index := range buckets {
|
||||
bucket := buckets[index].Name
|
||||
m, ok := p.resyncState.statusMap[bucket]
|
||||
if ok {
|
||||
for arn, st := range m.TargetsMap {
|
||||
if st.ResyncStatus == ResyncFailed || st.ResyncStatus == ResyncStarted {
|
||||
go resyncBucket(ctx, bucket, arn, true, objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load bucket resync metadata from disk
|
||||
func loadBucketResyncMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) (brs BucketReplicationResyncStatus, e error) {
|
||||
brs = newBucketResyncStatus(bucket)
|
||||
|
||||
resyncDirPath := path.Join(bucketMetaPrefix, bucket, replicationDir)
|
||||
data, err := readConfig(GlobalContext, objAPI, pathJoin(resyncDirPath, resyncFileName))
|
||||
if err != nil && err != errConfigNotFound {
|
||||
return brs, err
|
||||
}
|
||||
if len(data) == 0 {
|
||||
// Seems to be empty.
|
||||
return brs, nil
|
||||
}
|
||||
if len(data) <= 4 {
|
||||
return brs, fmt.Errorf("replication resync: no data")
|
||||
}
|
||||
// Read resync meta header
|
||||
switch binary.LittleEndian.Uint16(data[0:2]) {
|
||||
case resyncMetaFormat:
|
||||
default:
|
||||
return brs, fmt.Errorf("resyncMeta: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
|
||||
}
|
||||
switch binary.LittleEndian.Uint16(data[2:4]) {
|
||||
case resyncMetaVersion:
|
||||
default:
|
||||
return brs, fmt.Errorf("resyncMeta: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
// OK, parse data.
|
||||
if _, err = brs.UnmarshalMsg(data[4:]); err != nil {
|
||||
return brs, err
|
||||
}
|
||||
|
||||
switch brs.Version {
|
||||
case resyncMetaVersionV1:
|
||||
default:
|
||||
return brs, fmt.Errorf("unexpected resync meta version: %d", brs.Version)
|
||||
}
|
||||
return brs, nil
|
||||
}
|
||||
|
||||
// save resync status to resync.bin
|
||||
func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationResyncStatus, objectAPI ObjectLayer) error {
|
||||
data := make([]byte, 4, brs.Msgsize()+4)
|
||||
|
||||
// Initialize the resync meta header.
|
||||
binary.LittleEndian.PutUint16(data[0:2], resyncMetaFormat)
|
||||
binary.LittleEndian.PutUint16(data[2:4], resyncMetaVersion)
|
||||
|
||||
buf, err := brs.MarshalMsg(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketMetaPrefix, bucket, replicationDir, resyncFileName)
|
||||
return saveConfig(ctx, objectAPI, configFile, buf)
|
||||
}
|
||||
|
||||
@@ -55,27 +55,28 @@ var replicationConfigTests = []struct {
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ //1. no replication config
|
||||
{ // 1. no replication config
|
||||
name: "no replication config",
|
||||
info: ObjectInfo{Size: 100},
|
||||
rcfg: replicationConfig{Config: nil},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //2. existing object replication config enabled, no versioning
|
||||
{ // 2. existing object replication config enabled, no versioning
|
||||
name: "existing object replication config enabled, no versioning",
|
||||
info: ObjectInfo{Size: 100},
|
||||
rcfg: replicationConfig{Config: &configs[0]},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //3. existing object replication config enabled, versioning suspended
|
||||
{ // 3. existing object replication config enabled, versioning suspended
|
||||
name: "existing object replication config enabled, versioning suspended",
|
||||
info: ObjectInfo{Size: 100, VersionID: nullVersionID},
|
||||
rcfg: replicationConfig{Config: &configs[0]},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
{ // 4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
name: "existing object replication enabled, versioning enabled; no reset in progress",
|
||||
info: ObjectInfo{Size: 100,
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
@@ -93,174 +94,192 @@ func TestReplicationResync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var start = UTCNow().AddDate(0, 0, -1)
|
||||
var replicationConfigTests2 = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
rcfg replicationConfig
|
||||
dsc ReplicateDecision
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||
// 1: Pending replication
|
||||
name: "existing object replication on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
|
||||
{ // 2. replication status Failed
|
||||
name: "existing object replication on object in Failed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
{ //6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED;",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
var (
|
||||
start = UTCNow().AddDate(0, 0, -1)
|
||||
replicationConfigTests2 = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
rcfg replicationConfig
|
||||
dsc ReplicateDecision
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||
// 1: Pending replication
|
||||
name: "existing object replication on object in Pending replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 2. replication status Failed
|
||||
name: "existing object replication on object in Failed replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED;",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
|
||||
{ //8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
expectedSync: true,
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
{ //9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
||||
ModTime: UTCNow().AddDate(0, 0, -2),
|
||||
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
||||
ModTime: UTCNow().AddDate(0, 0, -2),
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{ //10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||
},
|
||||
expectedSync: false,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: start,
|
||||
}}},
|
||||
},
|
||||
},
|
||||
expectedSync: false,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: start,
|
||||
}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func TestReplicationResyncwrapper(t *testing.T) {
|
||||
for i, test := range replicationConfigTests2 {
|
||||
|
||||
@@ -18,11 +18,39 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// ReplicationLatency holds information of bucket operations latency, such us uploads
|
||||
type ReplicationLatency struct {
|
||||
// Single & Multipart PUTs latency
|
||||
UploadHistogram LastMinuteLatencies
|
||||
}
|
||||
|
||||
// Merge two replication latency into a new one
|
||||
func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) {
|
||||
newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram)
|
||||
return
|
||||
}
|
||||
|
||||
// Get upload latency of each object size range
|
||||
func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) {
|
||||
ret = make(map[string]uint64)
|
||||
avg := rl.UploadHistogram.GetAvgData()
|
||||
for k, v := range avg {
|
||||
// Convert nanoseconds to milliseconds
|
||||
ret[sizeTagToString(k)] = v.avg() / uint64(time.Millisecond)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Update replication upload latency with a new value
|
||||
func (rl *ReplicationLatency) update(size int64, duration time.Duration) {
|
||||
rl.UploadHistogram.Add(size, duration)
|
||||
}
|
||||
|
||||
// BucketStats bucket statistics
|
||||
type BucketStats struct {
|
||||
ReplicationStats BucketReplicationStats
|
||||
@@ -52,28 +80,18 @@ func (brs *BucketReplicationStats) Empty() bool {
|
||||
}
|
||||
|
||||
// Clone creates a new BucketReplicationStats copy
|
||||
func (brs BucketReplicationStats) Clone() BucketReplicationStats {
|
||||
c := BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)),
|
||||
}
|
||||
//this is called only by replicationStats cache and already holds a read lock before calling Clone()
|
||||
func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
|
||||
// This is called only by replicationStats cache and already holds a
|
||||
// read lock before calling Clone()
|
||||
|
||||
c = brs
|
||||
// We need to copy the map, so we do not reference the one in `brs`.
|
||||
c.Stats = make(map[string]*BucketReplicationStat, len(brs.Stats))
|
||||
for arn, st := range brs.Stats {
|
||||
c.Stats[arn] = &BucketReplicationStat{
|
||||
FailedSize: atomic.LoadInt64(&st.FailedSize),
|
||||
ReplicatedSize: atomic.LoadInt64(&st.ReplicatedSize),
|
||||
ReplicaSize: atomic.LoadInt64(&st.ReplicaSize),
|
||||
FailedCount: atomic.LoadInt64(&st.FailedCount),
|
||||
PendingSize: atomic.LoadInt64(&st.PendingSize),
|
||||
PendingCount: atomic.LoadInt64(&st.PendingCount),
|
||||
}
|
||||
// make a copy of `*st`
|
||||
s := *st
|
||||
c.Stats[arn] = &s
|
||||
}
|
||||
// update total counts across targets
|
||||
c.FailedSize = atomic.LoadInt64(&brs.FailedSize)
|
||||
c.FailedCount = atomic.LoadInt64(&brs.FailedCount)
|
||||
c.PendingCount = atomic.LoadInt64(&brs.PendingCount)
|
||||
c.PendingSize = atomic.LoadInt64(&brs.PendingSize)
|
||||
c.ReplicaSize = atomic.LoadInt64(&brs.ReplicaSize)
|
||||
c.ReplicatedSize = atomic.LoadInt64(&brs.ReplicatedSize)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -93,6 +111,8 @@ type BucketReplicationStat struct {
|
||||
PendingCount int64 `json:"pendingReplicationCount"`
|
||||
// Total number of failed operations including metadata updates
|
||||
FailedCount int64 `json:"failedReplicationCount"`
|
||||
// Replication latency information
|
||||
Latency ReplicationLatency `json:"replicationLatency"`
|
||||
}
|
||||
|
||||
func (bs *BucketReplicationStat) hasReplicationUsage() bool {
|
||||
|
||||
@@ -60,6 +60,35 @@ func (z *BucketReplicationStat) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
case "Latency":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
err = z.Latency.UploadHistogram.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -73,9 +102,9 @@ func (z *BucketReplicationStat) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketReplicationStat) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// write "PendingSize"
|
||||
err = en.Append(0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
err = en.Append(0x87, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -134,15 +163,31 @@ func (z *BucketReplicationStat) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
// write "Latency"
|
||||
err = en.Append(0xa7, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// map header, size 1
|
||||
// write "UploadHistogram"
|
||||
err = en.Append(0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Latency.UploadHistogram.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketReplicationStat) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// string "PendingSize"
|
||||
o = append(o, 0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = append(o, 0x87, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.PendingSize)
|
||||
// string "ReplicatedSize"
|
||||
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
@@ -159,6 +204,16 @@ func (z *BucketReplicationStat) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "FailedCount"
|
||||
o = append(o, 0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendInt64(o, z.FailedCount)
|
||||
// string "Latency"
|
||||
o = append(o, 0xa7, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
|
||||
// map header, size 1
|
||||
// string "UploadHistogram"
|
||||
o = append(o, 0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
o, err = z.Latency.UploadHistogram.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -216,6 +271,35 @@ func (z *BucketReplicationStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
case "Latency":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
bts, err = z.Latency.UploadHistogram.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Latency")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -230,7 +314,7 @@ func (z *BucketReplicationStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketReplicationStat) Msgsize() (s int) {
|
||||
s = 1 + 12 + msgp.Int64Size + 15 + msgp.Int64Size + 12 + msgp.Int64Size + 11 + msgp.Int64Size + 13 + msgp.Int64Size + 12 + msgp.Int64Size
|
||||
s = 1 + 12 + msgp.Int64Size + 15 + msgp.Int64Size + 12 + msgp.Int64Size + 11 + msgp.Int64Size + 13 + msgp.Int64Size + 12 + msgp.Int64Size + 8 + 1 + 16 + z.Latency.UploadHistogram.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -707,3 +791,110 @@ func (z *BucketStats) Msgsize() (s int) {
|
||||
s = 1 + 17 + z.ReplicationStats.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *ReplicationLatency) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
err = z.UploadHistogram.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ReplicationLatency) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "UploadHistogram"
|
||||
err = en.Append(0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.UploadHistogram.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ReplicationLatency) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "UploadHistogram"
|
||||
o = append(o, 0x81, 0xaf, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
|
||||
o, err = z.UploadHistogram.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *ReplicationLatency) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UploadHistogram":
|
||||
bts, err = z.UploadHistogram.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UploadHistogram")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *ReplicationLatency) Msgsize() (s int) {
|
||||
s = 1 + 16 + z.UploadHistogram.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -347,3 +347,116 @@ func BenchmarkDecodeBucketStats(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalReplicationLatency(t *testing.T) {
|
||||
v := ReplicationLatency{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeReplicationLatency(t *testing.T) {
|
||||
v := ReplicationLatency{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeReplicationLatency Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ReplicationLatency{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeReplicationLatency(b *testing.B) {
|
||||
v := ReplicationLatency{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -332,8 +332,10 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
||||
}
|
||||
|
||||
// getRemoteTargetInstanceTransport contains a singleton roundtripper.
|
||||
var getRemoteTargetInstanceTransport http.RoundTripper
|
||||
var getRemoteTargetInstanceTransportOnce sync.Once
|
||||
var (
|
||||
getRemoteTargetInstanceTransport http.RoundTripper
|
||||
getRemoteTargetInstanceTransportOnce sync.Once
|
||||
)
|
||||
|
||||
// Returns a minio-go Client configured to access remote host described in replication target config.
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*TargetClient, error) {
|
||||
@@ -414,7 +416,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
|
||||
return nil, nil
|
||||
}
|
||||
data = cdata
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if len(cmetadata) != 0 {
|
||||
if err := json.Unmarshal(cmetadata, &meta); err != nil {
|
||||
return nil, err
|
||||
@@ -444,6 +446,6 @@ type TargetClient struct {
|
||||
StorageClass string // storage class on remote
|
||||
disableProxy bool
|
||||
healthCancelFn context.CancelFunc // cancellation function for client healthcheck
|
||||
ARN string //ARN to uniquely identify remote target
|
||||
ARN string // ARN to uniquely identify remote target
|
||||
ResetID string
|
||||
}
|
||||
|
||||
@@ -63,6 +63,15 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "Cluster replication is enabled for this site, so the versioning state cannot be changed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
@@ -86,7 +95,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketVersioningConfig, configData); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -135,5 +144,4 @@ func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r
|
||||
|
||||
// Write bucket versioning configuration to client
|
||||
writeSuccessResponseXML(w, configData)
|
||||
|
||||
}
|
||||
|
||||
@@ -18,12 +18,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -34,16 +37,20 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
fcolor "github.com/fatih/color"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/inconshreveable/mousetrap"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
consoleCerts "github.com/minio/console/pkg/certs"
|
||||
"github.com/minio/console/restapi"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@@ -63,9 +70,24 @@ import (
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
var defaultAWSCredProvider []credentials.Provider
|
||||
|
||||
var (
|
||||
shardDiskTimeDelta time.Duration
|
||||
defaultAWSCredProvider []credentials.Provider
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
if mousetrap.StartedByExplorer() {
|
||||
fmt.Printf("Don't double-click %s\n", os.Args[0])
|
||||
fmt.Println("You need to open cmd.exe/PowerShell and run it from the command line")
|
||||
fmt.Println("Refer to the docs here on how to run it as a Windows Service https://github.com/minio/minio-service/tree/master/windows")
|
||||
fmt.Println("Press the Enter Key to Exit")
|
||||
fmt.Scanln()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
@@ -112,6 +134,8 @@ func init() {
|
||||
console.SetColor("Debug", fcolor.New())
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
gob.Register(madmin.TimeInfo{})
|
||||
gob.Register(map[string]interface{}{})
|
||||
|
||||
defaultAWSCredProvider = []credentials.Provider{
|
||||
&credentials.IAM{
|
||||
@@ -121,6 +145,12 @@ func init() {
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
shardDiskTimeDelta, err = time.ParseDuration(env.Get("_MINIO_SHARD_DISKTIME_DELTA", "1m"))
|
||||
if err != nil {
|
||||
shardDiskTimeDelta = 1 * time.Minute
|
||||
}
|
||||
|
||||
// All minio-go API operations shall be performed only once,
|
||||
// another way to look at this is we are turning off retries.
|
||||
minio.MaxRetry = 1
|
||||
@@ -162,7 +192,12 @@ func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_IDP_HMAC_PASSPHRASE", globalOpenIDConfig.ClientID)
|
||||
os.Setenv("CONSOLE_IDP_SCOPES", strings.Join(globalOpenIDConfig.DiscoveryDoc.ScopesSupported, ","))
|
||||
if globalOpenIDConfig.ClaimUserinfo {
|
||||
os.Setenv("CONSOLE_IDP_USERINFO", "on")
|
||||
os.Setenv("CONSOLE_IDP_USERINFO", config.EnableOn)
|
||||
}
|
||||
if globalOpenIDConfig.RedirectURIDynamic {
|
||||
// Enable dynamic redirect-uri's based on incoming 'host' header,
|
||||
// Overrides any other callback URL.
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK_DYNAMIC", config.EnableOn)
|
||||
}
|
||||
if globalOpenIDConfig.RedirectURI != "" {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", globalOpenIDConfig.RedirectURI)
|
||||
@@ -170,11 +205,17 @@ func minioConfigToConsoleFeatures() {
|
||||
os.Setenv("CONSOLE_IDP_CALLBACK", getConsoleEndpoints()[0]+"/oauth_callback")
|
||||
}
|
||||
}
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalServerRegion)
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
|
||||
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
|
||||
if globalSubnetConfig.License != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_LICENSE", globalSubnetConfig.License)
|
||||
}
|
||||
if globalSubnetConfig.APIKey != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_API_KEY", globalSubnetConfig.APIKey)
|
||||
}
|
||||
if globalSubnetConfig.Proxy != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_PROXY", globalSubnetConfig.Proxy)
|
||||
}
|
||||
}
|
||||
|
||||
func initConsoleServer() (*restapi.Server, error) {
|
||||
@@ -286,7 +327,7 @@ func checkUpdate(mode string) {
|
||||
return
|
||||
}
|
||||
|
||||
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
logger.Info(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
@@ -331,7 +372,6 @@ func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() s
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if json flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
@@ -412,7 +452,166 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
}
|
||||
|
||||
type envKV struct {
|
||||
Key string
|
||||
Value string
|
||||
Skip bool
|
||||
}
|
||||
|
||||
func (e envKV) String() string {
|
||||
if e.Skip {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s=%s", e.Key, e.Value)
|
||||
}
|
||||
|
||||
func parsEnvEntry(envEntry string) (envKV, error) {
|
||||
envEntry = strings.TrimSpace(envEntry)
|
||||
if envEntry == "" {
|
||||
// Skip all empty lines
|
||||
return envKV{
|
||||
Skip: true,
|
||||
}, nil
|
||||
}
|
||||
const envSeparator = "="
|
||||
envTokens := strings.SplitN(strings.TrimSpace(strings.TrimPrefix(envEntry, "export")), envSeparator, 2)
|
||||
if len(envTokens) != 2 {
|
||||
return envKV{}, fmt.Errorf("envEntry malformed; %s, expected to be of form 'KEY=value'", envEntry)
|
||||
}
|
||||
|
||||
key := envTokens[0]
|
||||
val := envTokens[1]
|
||||
|
||||
// Remove quotes from the value if found
|
||||
if len(val) >= 2 {
|
||||
quote := val[0]
|
||||
if (quote == '"' || quote == '\'') && val[len(val)-1] == quote {
|
||||
val = val[1 : len(val)-1]
|
||||
}
|
||||
}
|
||||
|
||||
return envKV{
|
||||
Key: key,
|
||||
Value: val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Similar to os.Environ returns a copy of strings representing
|
||||
// the environment values from a file, in the form "key, value".
|
||||
// in a structured form.
|
||||
func minioEnvironFromFile(envConfigFile string) ([]envKV, error) {
|
||||
f, err := os.Open(envConfigFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // ignore if file doesn't exist.
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var ekvs []envKV
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
ekv, err := parsEnvEntry(scanner.Text())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ekv.Skip {
|
||||
// Skips empty lines
|
||||
continue
|
||||
}
|
||||
ekvs = append(ekvs, ekv)
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ekvs, nil
|
||||
}
|
||||
|
||||
func readFromSecret(sp string) (string, error) {
|
||||
// Supports reading path from docker secrets, filename is
|
||||
// relative to /run/secrets/ position.
|
||||
if isFile(pathJoin("/run/secrets/", sp)) {
|
||||
sp = pathJoin("/run/secrets/", sp)
|
||||
}
|
||||
credBuf, err := ioutil.ReadFile(sp)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) { // ignore if file doesn't exist.
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return string(bytes.TrimSpace(credBuf)), nil
|
||||
}
|
||||
|
||||
func loadEnvVarsFromFiles() {
|
||||
if env.IsSet(config.EnvAccessKeyFile) {
|
||||
accessKey, err := readFromSecret(env.Get(config.EnvAccessKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if accessKey != "" {
|
||||
os.Setenv(config.EnvRootUser, accessKey)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvSecretKeyFile) {
|
||||
secretKey, err := readFromSecret(env.Get(config.EnvSecretKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if secretKey != "" {
|
||||
os.Setenv(config.EnvRootPassword, secretKey)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootUserFile) {
|
||||
rootUser, err := readFromSecret(env.Get(config.EnvRootUserFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if rootUser != "" {
|
||||
os.Setenv(config.EnvRootUser, rootUser)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvRootPasswordFile) {
|
||||
rootPassword, err := readFromSecret(env.Get(config.EnvRootPasswordFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the secret file(s)")
|
||||
}
|
||||
if rootPassword != "" {
|
||||
os.Setenv(config.EnvRootPassword, rootPassword)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvKMSSecretKeyFile) {
|
||||
kmsSecret, err := readFromSecret(env.Get(config.EnvKMSSecretKeyFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to read the KMS secret key inherited from secret file")
|
||||
}
|
||||
if kmsSecret != "" {
|
||||
os.Setenv(config.EnvKMSSecretKey, kmsSecret)
|
||||
}
|
||||
}
|
||||
|
||||
if env.IsSet(config.EnvConfigEnvFile) {
|
||||
ekvs, err := minioEnvironFromFile(env.Get(config.EnvConfigEnvFile, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to read the config environment file")
|
||||
}
|
||||
for _, ekv := range ekvs {
|
||||
os.Setenv(ekv.Key, ekv.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
loadEnvVarsFromFiles()
|
||||
|
||||
var err error
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||
if err != nil {
|
||||
@@ -457,6 +656,16 @@ func handleCommonEnvVars() {
|
||||
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
|
||||
}
|
||||
|
||||
if rootDiskSize := env.Get(config.EnvRootDiskThresholdSize, ""); rootDiskSize != "" {
|
||||
size, err := humanize.ParseBytes(rootDiskSize)
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Invalid %s value in environment variable", config.EnvRootDiskThresholdSize))
|
||||
}
|
||||
globalRootDiskThreshold = size
|
||||
}
|
||||
|
||||
globalIsCICD = env.Get("MINIO_CI_CD", "") != "" || env.Get("CI", "") != ""
|
||||
|
||||
domains := env.Get(config.EnvDomain, "")
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
@@ -479,11 +688,11 @@ func handleCommonEnvVars() {
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
if len(publicIPs) != 0 {
|
||||
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
|
||||
var domainIPs = set.NewStringSet()
|
||||
domainIPs := set.NewStringSet()
|
||||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
addrs, err := net.LookupHost(endpoint)
|
||||
addrs, err := globalDNSCache.LookupHost(GlobalContext, endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
|
||||
}
|
||||
@@ -509,11 +718,12 @@ func handleCommonEnvVars() {
|
||||
// in-place update is off.
|
||||
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.EnableOn), config.EnableOff)
|
||||
|
||||
// Check if the supported credential env vars, "MINIO_ROOT_USER" and
|
||||
// "MINIO_ROOT_PASSWORD" are provided
|
||||
// Check if the supported credential env vars,
|
||||
// "MINIO_ROOT_USER" and "MINIO_ROOT_PASSWORD" are provided
|
||||
// Warn user if deprecated environment variables,
|
||||
// "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined
|
||||
// Check all error conditions first
|
||||
//nolint:gocritic
|
||||
if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO")
|
||||
} else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
|
||||
@@ -530,29 +740,29 @@ func handleCommonEnvVars() {
|
||||
// are defined or both are not defined.
|
||||
// Check both cases and authenticate them if correctly defined
|
||||
var user, password string
|
||||
haveRootCredentials := false
|
||||
haveAccessCredentials := false
|
||||
var hasCredentials bool
|
||||
//nolint:gocritic
|
||||
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
user = env.Get(config.EnvRootUser, "")
|
||||
password = env.Get(config.EnvRootPassword, "")
|
||||
haveRootCredentials = true
|
||||
hasCredentials = true
|
||||
} else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
user = env.Get(config.EnvAccessKey, "")
|
||||
password = env.Get(config.EnvSecretKey, "")
|
||||
haveAccessCredentials = true
|
||||
hasCredentials = true
|
||||
}
|
||||
if haveRootCredentials || haveAccessCredentials {
|
||||
if hasCredentials {
|
||||
cred, err := auth.CreateCredentials(user, password)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
if haveAccessCredentials {
|
||||
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
|
||||
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
|
||||
" Please use %s and %s",
|
||||
config.EnvAccessKey, config.EnvSecretKey,
|
||||
config.EnvRootUser, config.EnvRootPassword)
|
||||
logStartupMessage(color.RedBold(msg))
|
||||
logger.Info(color.RedBold(msg))
|
||||
}
|
||||
globalActiveCred = cred
|
||||
}
|
||||
@@ -595,7 +805,7 @@ func handleCommonEnvVars() {
|
||||
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get())))
|
||||
}
|
||||
|
||||
var defaultKeyID = env.Get(config.EnvKESKeyName, "")
|
||||
defaultKeyID := env.Get(config.EnvKESKeyName, "")
|
||||
KMS, err := kms.NewWithConfig(kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: defaultKeyID,
|
||||
@@ -617,13 +827,6 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
if globalConsoleSys != nil {
|
||||
globalConsoleSys.Send(msg, string(logger.All))
|
||||
}
|
||||
logger.StartupMessage(msg)
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
return nil, nil, false, nil
|
||||
@@ -699,6 +902,13 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
|
||||
// Certs might be symlinks, reload them every 10 seconds.
|
||||
manager.UpdateReloadDuration(10 * time.Second)
|
||||
|
||||
// syscall.SIGHUP to reload the certs.
|
||||
manager.ReloadOnSignal(syscall.SIGHUP)
|
||||
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
@@ -711,3 +921,32 @@ func contextCanceled(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// bgContext returns a context that can be used for async operations.
|
||||
// Cancellation/timeouts are removed, so parent cancellations/timeout will
|
||||
// not propagate from parent.
|
||||
// Context values are preserved.
|
||||
// This can be used for goroutines that live beyond the parent context.
|
||||
func bgContext(parent context.Context) context.Context {
|
||||
return bgCtx{parent: parent}
|
||||
}
|
||||
|
||||
type bgCtx struct {
|
||||
parent context.Context
|
||||
}
|
||||
|
||||
func (a bgCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a bgCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (a bgCtx) Value(key interface{}) interface{} {
|
||||
return a.parent.Value(key)
|
||||
}
|
||||
|
||||
161
cmd/common-main_test.go
Normal file
161
cmd/common-main_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_readFromSecret(t *testing.T) {
|
||||
testCases := []struct {
|
||||
content string
|
||||
expectedErr bool
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
"value\n",
|
||||
false,
|
||||
"value",
|
||||
},
|
||||
{
|
||||
" \t\n Hello, Gophers \n\t\r\n",
|
||||
false,
|
||||
"Hello, Gophers",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "testfile")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
tmpfile.WriteString(testCase.content)
|
||||
tmpfile.Sync()
|
||||
tmpfile.Close()
|
||||
|
||||
value, err := readFromSecret(tmpfile.Name())
|
||||
if err != nil && !testCase.expectedErr {
|
||||
t.Error(err)
|
||||
}
|
||||
if err == nil && testCase.expectedErr {
|
||||
t.Error(errors.New("expected error, found success"))
|
||||
}
|
||||
if value != testCase.expectedValue {
|
||||
t.Errorf("Expected %s, got %s", testCase.expectedValue, value)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_minioEnvironFromFile(t *testing.T) {
|
||||
testCases := []struct {
|
||||
content string
|
||||
expectedErr bool
|
||||
expectedEkvs []envKV
|
||||
}{
|
||||
{
|
||||
`
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Key: "MINIO_ROOT_PASSWORD",
|
||||
Value: "minio123",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Value with double quotes
|
||||
{
|
||||
`export MINIO_ROOT_USER="minio"`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Value with single quotes
|
||||
{
|
||||
`export MINIO_ROOT_USER='minio'`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`
|
||||
MINIO_ROOT_USER=minio
|
||||
MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
Key: "MINIO_ROOT_USER",
|
||||
Value: "minio",
|
||||
},
|
||||
{
|
||||
Key: "MINIO_ROOT_PASSWORD",
|
||||
Value: "minio123",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`
|
||||
export MINIO_ROOT_USERminio
|
||||
export MINIO_ROOT_PASSWORD=minio123`,
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "testfile")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
tmpfile.WriteString(testCase.content)
|
||||
tmpfile.Sync()
|
||||
tmpfile.Close()
|
||||
|
||||
ekvs, err := minioEnvironFromFile(tmpfile.Name())
|
||||
if err != nil && !testCase.expectedErr {
|
||||
t.Error(err)
|
||||
}
|
||||
if err == nil && testCase.expectedErr {
|
||||
t.Error(errors.New("expected error, found success"))
|
||||
}
|
||||
if !reflect.DeepEqual(ekvs, testCase.expectedEkvs) {
|
||||
t.Errorf("expected %v, got %v", testCase.expectedEkvs, ekvs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -29,27 +29,31 @@ import (
|
||||
|
||||
var errConfigNotFound = errors.New("config file not found")
|
||||
|
||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
|
||||
// Read entire content by setting size to -1
|
||||
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
func readConfigWithMetadata(ctx context.Context, store objectIO, configFile string) ([]byte, ObjectInfo, error) {
|
||||
r, err := store.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
// Treat object not found as config not found.
|
||||
if isErrObjectNotFound(err) {
|
||||
return nil, errConfigNotFound
|
||||
return nil, ObjectInfo{}, errConfigNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return nil, errConfigNotFound
|
||||
return nil, ObjectInfo{}, errConfigNotFound
|
||||
}
|
||||
return buf, nil
|
||||
return buf, r.ObjInfo, nil
|
||||
}
|
||||
|
||||
func readConfig(ctx context.Context, store objectIO, configFile string) ([]byte, error) {
|
||||
buf, _, err := readConfigWithMetadata(ctx, store, configFile)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
type objectDeleter interface {
|
||||
@@ -66,13 +70,13 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string)
|
||||
return err
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
func saveConfig(ctx context.Context, store objectIO, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
_, err = store.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -44,13 +44,11 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/logger/target/http"
|
||||
"github.com/minio/minio/internal/logger/target/kafka"
|
||||
"github.com/minio/pkg/env"
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
var kvs = map[string]config.KVS{
|
||||
kvs := map[string]config.KVS{
|
||||
config.EtcdSubSys: etcd.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.CompressionSubSys: compress.DefaultKVS,
|
||||
@@ -58,10 +56,11 @@ func initHelp() {
|
||||
config.IdentityOpenIDSubSys: openid.DefaultKVS,
|
||||
config.IdentityTLSSubSys: xtls.DefaultKVS,
|
||||
config.PolicyOPASubSys: opa.DefaultKVS,
|
||||
config.SiteSubSys: config.DefaultSiteKVS,
|
||||
config.RegionSubSys: config.DefaultRegionKVS,
|
||||
config.APISubSys: api.DefaultKVS,
|
||||
config.CredentialsSubSys: config.DefaultCredentialKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultLoggerWebhookKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditWebhookKVS,
|
||||
config.AuditKafkaSubSys: logger.DefaultAuditKafkaKVS,
|
||||
config.HealSubSys: heal.DefaultKVS,
|
||||
@@ -77,10 +76,10 @@ func initHelp() {
|
||||
config.RegisterDefaultKVS(kvs)
|
||||
|
||||
// Captures help for each sub-system
|
||||
var helpSubSys = config.HelpKVS{
|
||||
helpSubSys := config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: config.RegionSubSys,
|
||||
Description: "label the location of the server",
|
||||
Key: config.SiteSubSys,
|
||||
Description: "label the server and its location",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CacheSubSys,
|
||||
@@ -190,7 +189,7 @@ func initHelp() {
|
||||
config.HelpKV{
|
||||
Key: config.SubnetSubSys,
|
||||
Type: "string",
|
||||
Description: "set subnet config for the cluster e.g. license token",
|
||||
Description: "set subnet config for the cluster e.g. api key",
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
@@ -204,8 +203,9 @@ func initHelp() {
|
||||
}
|
||||
}
|
||||
|
||||
var helpMap = map[string]config.HelpKVS{
|
||||
helpMap := map[string]config.HelpKVS{
|
||||
"": helpSubSys, // Help for all sub-systems.
|
||||
config.SiteSubSys: config.SiteHelp,
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
config.APISubSys: api.Help,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
@@ -231,10 +231,20 @@ func initHelp() {
|
||||
config.NotifyRedisSubSys: notify.HelpRedis,
|
||||
config.NotifyWebhookSubSys: notify.HelpWebhook,
|
||||
config.NotifyESSubSys: notify.HelpES,
|
||||
config.SubnetSubSys: subnet.HelpLicense,
|
||||
config.SubnetSubSys: subnet.HelpSubnet,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
|
||||
// save top-level help for deprecated sub-systems in a separate map.
|
||||
deprecatedHelpKVMap := map[string]config.HelpKV{
|
||||
config.RegionSubSys: {
|
||||
Key: config.RegionSubSys,
|
||||
Description: "[DEPRECATED - use `site` instead] label the location of the server",
|
||||
},
|
||||
}
|
||||
|
||||
config.RegisterHelpDeprecatedSubSys(deprecatedHelpKVMap)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -243,65 +253,55 @@ var (
|
||||
globalServerConfigMu sync.RWMutex
|
||||
)
|
||||
|
||||
func validateConfig(s config.Config) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
// Disable merging env values with config for validation.
|
||||
env.SetEnvOff()
|
||||
|
||||
// Enable env values to validate KMS.
|
||||
defer env.SetEnvOn()
|
||||
|
||||
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := config.LookupRegion(s[config.RegionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) error {
|
||||
switch subSys {
|
||||
case config.CredentialsSubSys:
|
||||
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, setDriveCount := range objAPI.SetDriveCounts() {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
case config.SiteSubSys:
|
||||
if _, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.APISubSys:
|
||||
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.StorageClassSubSys:
|
||||
if globalIsErasure {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
for _, setDriveCount := range objAPI.SetDriveCounts() {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
case config.CacheSubSys:
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.CompressionSubSys:
|
||||
compCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
case config.HealSubSys:
|
||||
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.ScannerSubSys:
|
||||
if _, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.EtcdSubSys:
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -313,15 +313,13 @@ func validateConfig(s config.Config) error {
|
||||
}
|
||||
etcdClnt.Close()
|
||||
}
|
||||
}
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs)
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityLDAPSubSys:
|
||||
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -332,24 +330,58 @@ func validateConfig(s config.Config) error {
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
case config.IdentityTLSSubSys:
|
||||
if _, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
if _, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.PolicyOPASubSys:
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if config.LoggerSubSystems.Contains(subSys) {
|
||||
if err := logger.ValidateSubSysConfig(s, subSys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
_, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default])
|
||||
if err != nil {
|
||||
|
||||
if config.NotifySubSystems.Contains(subSys) {
|
||||
if err := notify.TestSubSysNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs(), subSys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateConfig(s config.Config, subSys string) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
// Disable merging env values with config for validation.
|
||||
env.SetEnvOff()
|
||||
|
||||
// Enable env values to validate KMS.
|
||||
defer env.SetEnvOn()
|
||||
if subSys != "" {
|
||||
return validateSubSysConfig(s, subSys, objAPI)
|
||||
}
|
||||
|
||||
// No sub-system passed. Validate all of them.
|
||||
for _, ss := range config.SubSystems.ToSlice() {
|
||||
if err := validateSubSysConfig(s, ss, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := logger.LookupConfig(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
@@ -437,9 +469,9 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
// but not federation.
|
||||
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
|
||||
|
||||
globalServerRegion, err = config.LookupRegion(s[config.RegionSubSys][config.Default])
|
||||
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
|
||||
}
|
||||
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
@@ -496,8 +528,12 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err))
|
||||
}
|
||||
|
||||
if globalSTSTLSConfig.InsecureSkipVerify {
|
||||
logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify))
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody)
|
||||
NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
|
||||
}
|
||||
@@ -522,48 +558,6 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
}
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
loggerUserAgent := getUserAgent(getMinioMode())
|
||||
|
||||
loggerCfg, err := logger.LookupConfig(s)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = loggerUserAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
// Enable http logging
|
||||
if err = logger.AddTarget(http.New(l)); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize console HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.AuditWebhook {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = loggerUserAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
// Enable http audit logging
|
||||
if err = logger.AddAuditTarget(http.New(l)); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.AuditKafka {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
// Enable Kafka audit logging
|
||||
if err = logger.AddAuditTarget(kafka.New(l)); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit Kafka target: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalConfigTargetList, err = notify.GetNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
@@ -584,65 +578,114 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {
|
||||
// Read all dynamic configs.
|
||||
// API
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s config.Config, subSys string) error {
|
||||
switch subSys {
|
||||
case config.APISubSys:
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
var setDriveCounts []int
|
||||
if objAPI != nil {
|
||||
setDriveCounts = objAPI.SetDriveCounts()
|
||||
}
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
case config.CompressionSubSys:
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
// Validate if the object layer supports compression.
|
||||
if objAPI != nil {
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
case config.HealSubSys:
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
globalHealConfig.Update(healCfg)
|
||||
case config.ScannerSubSys:
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
case config.LoggerWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.LoggerWebhookSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
|
||||
}
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
loggerCfg.HTTP[n] = l
|
||||
}
|
||||
}
|
||||
err = logger.UpdateSystemTargets(loggerCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %w", err))
|
||||
}
|
||||
case config.AuditWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.AuditWebhookSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
|
||||
}
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.AuditWebhook {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
loggerCfg.AuditWebhook[n] = l
|
||||
}
|
||||
}
|
||||
|
||||
// Compression
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
|
||||
// Validate if the object layer supports compression.
|
||||
if objAPI != nil {
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
err = logger.UpdateAuditWebhookTargets(loggerCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %w", err))
|
||||
}
|
||||
case config.AuditKafkaSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(s, config.AuditKafkaSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
|
||||
}
|
||||
for n, l := range loggerCfg.AuditKafka {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
loggerCfg.AuditKafka[n] = l
|
||||
}
|
||||
}
|
||||
err = logger.UpdateAuditKafkaTargets(loggerCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Heal
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
}
|
||||
|
||||
// Scanner
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
}
|
||||
|
||||
// Apply configurations.
|
||||
// We should not fail after this.
|
||||
var setDriveCounts []int
|
||||
if objAPI != nil {
|
||||
setDriveCounts = objAPI.SetDriveCounts()
|
||||
}
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
|
||||
globalHealConfig.Update(healCfg)
|
||||
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
|
||||
// Update all dynamic config values in memory.
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
if globalServerConfig != nil {
|
||||
for k := range config.SubSystemsDynamic {
|
||||
globalServerConfig[k] = s[k]
|
||||
globalServerConfig[subSys] = s[subSys]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {
|
||||
for subSys := range config.SubSystemsDynamic {
|
||||
err := applyDynamicConfigForSubSys(ctx, objAPI, s, subSys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -670,7 +713,10 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
|
||||
subSysHelp, ok := config.HelpSubSysMap[""].Lookup(subSys)
|
||||
if !ok {
|
||||
return Help{}, config.Errorf("unknown sub-system %s", subSys)
|
||||
subSysHelp, ok = config.HelpDeprecatedSubSysMap[subSys]
|
||||
if !ok {
|
||||
return Help{}, config.Errorf("unknown sub-system %s", subSys)
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := config.HelpSubSysMap[subSys]
|
||||
@@ -692,9 +738,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
// to list the ENV, for regular k/v EnableKey is
|
||||
// implicit, for ENVs we cannot make it implicit.
|
||||
if subSysHelp.MultipleTargets {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
|
||||
}, config.EnvWordDelimiter)
|
||||
envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(madmin.EnableKey)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
|
||||
@@ -703,9 +747,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
})
|
||||
}
|
||||
for _, hkv := range h {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
|
||||
}, config.EnvWordDelimiter)
|
||||
envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(hkv.Key)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: hkv.Description,
|
||||
|
||||
@@ -36,18 +36,21 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
if globalServerRegion != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalServerRegion)
|
||||
if globalSite.Region != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region)
|
||||
}
|
||||
|
||||
// Set new region and verify.
|
||||
config.SetRegion(globalServerConfig, "us-west-1")
|
||||
region, err := config.LookupRegion(globalServerConfig[config.RegionSubSys][config.Default])
|
||||
site, err := config.LookupSite(
|
||||
globalServerConfig[config.SiteSubSys][config.Default],
|
||||
globalServerConfig[config.RegionSubSys][config.Default],
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if region != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalServerRegion)
|
||||
if site.Region != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region)
|
||||
}
|
||||
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
|
||||
@@ -84,7 +84,7 @@ func (dir *ConfigDir) Get() string {
|
||||
|
||||
// Attempts to create all directories, ignores any permission denied errors.
|
||||
func mkdirAllIgnorePerm(path string) error {
|
||||
err := os.MkdirAll(path, 0700)
|
||||
err := os.MkdirAll(path, 0o700)
|
||||
if err != nil {
|
||||
// It is possible in kubernetes like deployments this directory
|
||||
// is already mounted and is not writable, ignore any write errors.
|
||||
|
||||
@@ -74,7 +74,7 @@ func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Attempting to re-encrypt IAM users and policies on etcd with %q (%s)", stat.DefaultKey, stat.Name)
|
||||
logger.Info(fmt.Sprintf("Attempting to re-encrypt IAM users and policies on etcd with %q (%s)", stat.DefaultKey, stat.Name))
|
||||
}
|
||||
|
||||
listCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
@@ -143,7 +143,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, encrypted bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Attempting to re-encrypt config, IAM users and policies on MinIO with %q (%s)", stat.DefaultKey, stat.Name)
|
||||
logger.Info(fmt.Sprintf("Attempting to re-encrypt config, IAM users and policies on MinIO with %q (%s)", stat.DefaultKey, stat.Name))
|
||||
}
|
||||
|
||||
var marker string
|
||||
|
||||
@@ -2445,12 +2445,12 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var configFiles = []string{
|
||||
configFiles := []string{
|
||||
getConfigFile(),
|
||||
getConfigFile() + ".deprecated",
|
||||
configFile,
|
||||
}
|
||||
var config = &serverConfigV27{}
|
||||
config := &serverConfigV27{}
|
||||
for _, cfgFile := range configFiles {
|
||||
if _, err = Load(cfgFile, config); err != nil {
|
||||
if !osIsNotExist(err) && !osIsPermission(err) {
|
||||
@@ -2754,7 +2754,6 @@ func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
xldap.SetIdentityLDAP(newCfg, cfg.LDAPServerConfig)
|
||||
openid.SetIdentityOpenID(newCfg, cfg.OpenID)
|
||||
opa.SetPolicyOPAConfig(newCfg, cfg.Policy.OPA)
|
||||
cache.SetCacheConfig(newCfg, cfg.Cache)
|
||||
compress.SetCompressionConfig(newCfg, cfg.Compression)
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
// Create a V1 config json file and store it
|
||||
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
||||
configPath := rootPath + "/fsUsers.json"
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Fire a migrateConfig()
|
||||
@@ -194,7 +194,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
|
||||
// Create a V2 config json file and store it
|
||||
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@@ -343,7 +343,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
|
||||
0644); err != nil {
|
||||
0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@@ -354,7 +354,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a corrupted config file for version '2'.
|
||||
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0644); err != nil {
|
||||
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user