Compare commits
819 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be97ae4c5d | ||
|
|
4d7d008741 | ||
|
|
2d7a3d1516 | ||
|
|
dfab400d43 | ||
|
|
70078eab10 | ||
|
|
3415c4dd1e | ||
|
|
e200808ab7 | ||
|
|
fae563b85d | ||
|
|
3e6dc02f8f | ||
|
|
95e4cbbfde | ||
|
|
2825294b7b | ||
|
|
bce93b5cfa | ||
|
|
7a4b250c8b | ||
|
|
e5335450a4 | ||
|
|
a6ffdf1dd4 | ||
|
|
69e41f87ef | ||
|
|
ee48f9f206 | ||
|
|
9ba39d7fad | ||
|
|
d2fb371f80 | ||
|
|
2f9018f03b | ||
|
|
eb990f64a9 | ||
|
|
bbb64eaade | ||
|
|
7bd1d899bc | ||
|
|
c91d1ec2e3 | ||
|
|
c50b64027d | ||
|
|
20960b6a2d | ||
|
|
3bd3470d0b | ||
|
|
ba39ed9af7 | ||
|
|
62e6dc950d | ||
|
|
5a5046ce45 | ||
|
|
ad04afe381 | ||
|
|
ba9f0f2480 | ||
|
|
d06b63d056 | ||
|
|
7ce28c3b1d | ||
|
|
e3ac4035b9 | ||
|
|
d21b6daa49 | ||
|
|
76ebb16688 | ||
|
|
55aa431578 | ||
|
|
614981e566 | ||
|
|
b8b956a05d | ||
|
|
d2eed44c78 | ||
|
|
789cbc6fb2 | ||
|
|
0662c90b5c | ||
|
|
a2cab02554 | ||
|
|
6c7a21df6b | ||
|
|
f933b0b708 | ||
|
|
9f305273a7 | ||
|
|
cbd9efcb43 | ||
|
|
29a25a538f | ||
|
|
2dd8faaedc | ||
|
|
f00187033d | ||
|
|
c5141d65ac | ||
|
|
069c4015cd | ||
|
|
2f6e03fb60 | ||
|
|
0fbb945e13 | ||
|
|
b94dd835c9 | ||
|
|
44fc707423 | ||
|
|
5aaef9790f | ||
|
|
7edc352d23 | ||
|
|
850a84b08a | ||
|
|
4148754ce0 | ||
|
|
2107722829 | ||
|
|
d326ba52e9 | ||
|
|
91e1487de4 | ||
|
|
5ffb2a9605 | ||
|
|
17fe91d6d1 | ||
|
|
90a9f2dd70 | ||
|
|
d5e48cfd65 | ||
|
|
d274566463 | ||
|
|
39ac720826 | ||
|
|
21b6204692 | ||
|
|
d98faeb26a | ||
|
|
0a63dc199c | ||
|
|
3ba857dfa1 | ||
|
|
a8554c4022 | ||
|
|
ba54b39c02 | ||
|
|
2a75225569 | ||
|
|
e72429c79c | ||
|
|
c5b3f5553f | ||
|
|
d3ae0aaad3 | ||
|
|
d67bccf861 | ||
|
|
1277ad69a6 | ||
|
|
8f93e81afb | ||
|
|
4af31e654b | ||
|
|
aad50579ba | ||
|
|
38d059b0ae | ||
|
|
bd4eeb4522 | ||
|
|
03e3493288 | ||
|
|
64baedf5a4 | ||
|
|
2f64d5f77e | ||
|
|
f79a4ef4d0 | ||
|
|
2d53854b19 | ||
|
|
e5c83535af | ||
|
|
c904ef966e | ||
|
|
8f266e0772 | ||
|
|
e0fe7cc391 | ||
|
|
9d20dec56a | ||
|
|
597a785253 | ||
|
|
7d75b1e758 | ||
|
|
5f78691fcf | ||
|
|
a591e06ae5 | ||
|
|
443c93c634 | ||
|
|
5659cddc84 | ||
|
|
2a03a34bde | ||
|
|
1654a9b7e6 | ||
|
|
673a521711 | ||
|
|
2e23076688 | ||
|
|
b92ac55250 | ||
|
|
7981509cc8 | ||
|
|
6d5bc045bc | ||
|
|
d38e020b29 | ||
|
|
7d29030292 | ||
|
|
7c7650b7c3 | ||
|
|
ca80eced24 | ||
|
|
d0e0b81d8e | ||
|
|
391baa1c9a | ||
|
|
ae14681c3e | ||
|
|
4d698841f4 | ||
|
|
9906b3ade9 | ||
|
|
bf1769d3e0 | ||
|
|
63e1ad9f29 | ||
|
|
2c7bcee53f | ||
|
|
1fd90c93ff | ||
|
|
e947a844c9 | ||
|
|
4e2d39293a | ||
|
|
1228d6bf1a | ||
|
|
fc4561c64c | ||
|
|
3b7747b42b | ||
|
|
e432e79324 | ||
|
|
08d74819b6 | ||
|
|
aa3fde1784 | ||
|
|
0b3eb7f218 | ||
|
|
69c9496c71 | ||
|
|
b792b36495 | ||
|
|
d3db7d31a3 | ||
|
|
c05ca63158 | ||
|
|
6d3e0c7db6 | ||
|
|
0e59e50b39 | ||
|
|
d4b391de1b | ||
|
|
de4d3dac00 | ||
|
|
534e7161df | ||
|
|
9b219cd646 | ||
|
|
3bab4822f3 | ||
|
|
3c5f2d8916 | ||
|
|
5808190398 | ||
|
|
b2a82248b1 | ||
|
|
4e5fcca8b9 | ||
|
|
c36eaedb93 | ||
|
|
7752b03add | ||
|
|
01bfc78535 | ||
|
|
074d70112d | ||
|
|
e8d14c0d90 | ||
|
|
60d7e8143a | ||
|
|
9667a170de | ||
|
|
abae30f9e1 | ||
|
|
f9311bc9d1 | ||
|
|
b598402738 | ||
|
|
bd026b913f | ||
|
|
72ff69d9bb | ||
|
|
f30417d9a8 | ||
|
|
47a4ad3cd7 | ||
|
|
2f7a10ab31 | ||
|
|
b534dc69ab | ||
|
|
7b7d2ea7d4 | ||
|
|
e00de1c302 | ||
|
|
3549e583a6 | ||
|
|
f5e3eedf34 | ||
|
|
519dbfebf6 | ||
|
|
9a267f9270 | ||
|
|
67bd71b7a5 | ||
|
|
ec49fff583 | ||
|
|
8b660e18f2 | ||
|
|
981497799a | ||
|
|
b9bdc17465 | ||
|
|
b413ff9fdb | ||
|
|
6a15580817 | ||
|
|
39633a5581 | ||
|
|
1e83f15e2f | ||
|
|
888d2bb1d8 | ||
|
|
847ee5ac45 | ||
|
|
9a9a49aa84 | ||
|
|
a03ca80269 | ||
|
|
523bd769f1 | ||
|
|
8ff70ea5a9 | ||
|
|
da3e7747ca | ||
|
|
4afb59e63f | ||
|
|
1526e7ece3 | ||
|
|
6c07bfee8a | ||
|
|
446c760820 | ||
|
|
04f92f1291 | ||
|
|
4a60a7794d | ||
|
|
e5b16adb1c | ||
|
|
402a3ac719 | ||
|
|
f3d61c51fc | ||
|
|
0cde17ae5d | ||
|
|
8c1bba681b | ||
|
|
dbfb5e797b | ||
|
|
08ff702434 | ||
|
|
0e2148264a | ||
|
|
a75f42344b | ||
|
|
7926401cbd | ||
|
|
8161411c5d | ||
|
|
f64dea2aac | ||
|
|
6579304d8c | ||
|
|
6bb10a81a6 | ||
|
|
3cf8a7c888 | ||
|
|
2e38bb5175 | ||
|
|
a372c6a377 | ||
|
|
93b2f8a0c5 | ||
|
|
1a6568a25d | ||
|
|
9e95703efc | ||
|
|
d8e05aca81 | ||
|
|
410a1ac040 | ||
|
|
4caa3422bd | ||
|
|
a658b976f5 | ||
|
|
135874ebdc | ||
|
|
f4f1c42cba | ||
|
|
e7aa26dc29 | ||
|
|
c54ffde568 | ||
|
|
9a3c992d7a | ||
|
|
0c855638de | ||
|
|
943d815783 | ||
|
|
4c0acba62d | ||
|
|
62c3cdee75 | ||
|
|
3212d0c8cd | ||
|
|
1d03bea965 | ||
|
|
fbfeb59658 | ||
|
|
701da1282a | ||
|
|
df93ff92ba | ||
|
|
77d5331e85 | ||
|
|
14cdadfb56 | ||
|
|
f3a52cc195 | ||
|
|
7640cd24c9 | ||
|
|
f7b665347e | ||
|
|
9693c382a8 | ||
|
|
ee1047bd52 | ||
|
|
5ea5ab162b | ||
|
|
b5a09ff96b | ||
|
|
95c65f4e8f | ||
|
|
6bfff7532e | ||
|
|
1aa8896ad6 | ||
|
|
3e32ceb39f | ||
|
|
ca1350b092 | ||
|
|
9205434ed3 | ||
|
|
cd50e9b4bc | ||
|
|
ec816f3840 | ||
|
|
5f774951b1 | ||
|
|
2ca9befd2a | ||
|
|
72f5cb577e | ||
|
|
928c0181bf | ||
|
|
03767d26da | ||
|
|
108e6f92d4 | ||
|
|
d653a59fc0 | ||
|
|
01bfdf949a | ||
|
|
98f7821eb3 | ||
|
|
2d3898e0d5 | ||
|
|
ae46ce9937 | ||
|
|
dfc112c06b | ||
|
|
ca5fab8656 | ||
|
|
6df76ca73c | ||
|
|
f65dd3e5a2 | ||
|
|
a8d601b64a | ||
|
|
73b4794cf7 | ||
|
|
e2709ea129 | ||
|
|
740ec80819 | ||
|
|
d95e054282 | ||
|
|
7c1f9667d1 | ||
|
|
9246990496 | ||
|
|
0cf3d93360 | ||
|
|
cb06aee5ac | ||
|
|
1c70e9ed1b | ||
|
|
f3d6a2dd37 | ||
|
|
d1c58fc2eb | ||
|
|
b8f05b1471 | ||
|
|
e7baf78ee8 | ||
|
|
87299eba10 | ||
|
|
d3a07c29ba | ||
|
|
8d39b715dc | ||
|
|
7e3166475d | ||
|
|
5206c0e883 | ||
|
|
41ec038523 | ||
|
|
08d3d06a06 | ||
|
|
074febd9e1 | ||
|
|
aa8d25797b | ||
|
|
8d7d4adb91 | ||
|
|
ffa91f9794 | ||
|
|
0c31e61343 | ||
|
|
9b926f7dbe | ||
|
|
35d8728990 | ||
|
|
f7ed9a75ba | ||
|
|
9496c17e13 | ||
|
|
ed64e91f06 | ||
|
|
a481825ae1 | ||
|
|
7bb0f32332 | ||
|
|
c6f8dc431e | ||
|
|
78f177b8ee | ||
|
|
787c44c39d | ||
|
|
f06fee0364 | ||
|
|
c957e0d426 | ||
|
|
04101d472f | ||
|
|
51fc145161 | ||
|
|
9d63bb1b41 | ||
|
|
8ff2a7a2b9 | ||
|
|
91f91d8f47 | ||
|
|
a207bd6790 | ||
|
|
96d226c0b1 | ||
|
|
a86d98826d | ||
|
|
1bb670ecba | ||
|
|
c9e9a8e2b9 | ||
|
|
272367ccd2 | ||
|
|
95bf4a57b6 | ||
|
|
2228eb61cb | ||
|
|
5f07eb2d17 | ||
|
|
d96d696841 | ||
|
|
e18c0ab9bf | ||
|
|
faeb2b7e79 | ||
|
|
97ce11cb6b | ||
|
|
d7daae4762 | ||
|
|
3d86ae12bc | ||
|
|
ba46ee5dfa | ||
|
|
912bbb2f1d | ||
|
|
4f660a8eb7 | ||
|
|
ae4fb1b72e | ||
|
|
b435806d91 | ||
|
|
06929258bc | ||
|
|
cb577835d9 | ||
|
|
7f35f74f14 | ||
|
|
3d6194e93c | ||
|
|
72c7845f7e | ||
|
|
1c99597a06 | ||
|
|
feb9d8480b | ||
|
|
4e670458b8 | ||
|
|
48deccdc40 | ||
|
|
2eee744e34 | ||
|
|
3f72439b8a | ||
|
|
468a9fae83 | ||
|
|
d87f91720b | ||
|
|
aa0eec16ab | ||
|
|
d63e603040 | ||
|
|
8222a640ac | ||
|
|
7e45d84ace | ||
|
|
139a606f0a | ||
|
|
289223b6de | ||
|
|
c61dd16a1e | ||
|
|
3e38fa54a5 | ||
|
|
4a02189ba0 | ||
|
|
3d4fc28ec9 | ||
|
|
ec3a3bb10d | ||
|
|
364d3a0ac9 | ||
|
|
cb536a73eb | ||
|
|
428155add9 | ||
|
|
8bce123bba | ||
|
|
0a56dbde2f | ||
|
|
7ff4164d65 | ||
|
|
53a14c7301 | ||
|
|
dc45a5010d | ||
|
|
4b9192034c | ||
|
|
deeadd1a37 | ||
|
|
1fc4203c19 | ||
|
|
15b930be1f | ||
|
|
7fd76dbbb7 | ||
|
|
da81c6cc27 | ||
|
|
a03dac41eb | ||
|
|
b657ffa496 | ||
|
|
55778ae278 | ||
|
|
d990661d1f | ||
|
|
280526caf7 | ||
|
|
1173b26fc8 | ||
|
|
383489d5d9 | ||
|
|
9370b11684 | ||
|
|
999bbd3a14 | ||
|
|
235edd88aa | ||
|
|
b5e074e54c | ||
|
|
4d7068931a | ||
|
|
d7fb6fddf6 | ||
|
|
7213bd7131 | ||
|
|
d4aac7cd72 | ||
|
|
741de4cf94 | ||
|
|
f168ef9989 | ||
|
|
a0de56abb6 | ||
|
|
c201d8bda9 | ||
|
|
d2373d5d6c | ||
|
|
93fb7d62d8 | ||
|
|
485298b680 | ||
|
|
062f0cffad | ||
|
|
ce1c640ce0 | ||
|
|
5c32058ff3 | ||
|
|
24b4f9d748 | ||
|
|
81d7531f1f | ||
|
|
b4a23f720e | ||
|
|
a2f6252b2f | ||
|
|
6c964fede5 | ||
|
|
a25a8312d8 | ||
|
|
b2c5b75efa | ||
|
|
2dfa9adc5d | ||
|
|
88a89213ff | ||
|
|
8e2238ea09 | ||
|
|
2007dd26ae | ||
|
|
31e8f7c525 | ||
|
|
51f62a8da3 | ||
|
|
650efc2e96 | ||
|
|
1787bcfc91 | ||
|
|
2cc4997d24 | ||
|
|
934f6cabf6 | ||
|
|
233cc3905a | ||
|
|
68dd74c5ab | ||
|
|
48b590e14b | ||
|
|
8ab3dac4f2 | ||
|
|
3fb0cbc030 | ||
|
|
837a2a3d4b | ||
|
|
e91a4a414c | ||
|
|
74ccee6619 | ||
|
|
c26b8d4eb8 | ||
|
|
dae9dc4847 | ||
|
|
89f759566c | ||
|
|
5dc1ef0b87 | ||
|
|
cd7551031b | ||
|
|
df57bfcd6c | ||
|
|
dfb1f39b57 | ||
|
|
e3e3d92241 | ||
|
|
1b5f28e99b | ||
|
|
b69bcdcdc4 | ||
|
|
e385f54185 | ||
|
|
9a4d003ac7 | ||
|
|
d5656eeb65 | ||
|
|
8edc67b0a9 | ||
|
|
18b0b7299a | ||
|
|
09b0e7133d | ||
|
|
6d08af61a0 | ||
|
|
a7577da768 | ||
|
|
325fd80687 | ||
|
|
09626d78ff | ||
|
|
8f03c6e0db | ||
|
|
2c2f5d871c | ||
|
|
c599c11e70 | ||
|
|
ef06644799 | ||
|
|
6769d4dd54 | ||
|
|
f3e7c42425 | ||
|
|
f46bee242c | ||
|
|
d7520f0ae6 | ||
|
|
44b70eb646 | ||
|
|
828d4df6f0 | ||
|
|
467714f33b | ||
|
|
f8696cc8f6 | ||
|
|
9a7c7ab2d0 | ||
|
|
40fb3371fa | ||
|
|
51874a5776 | ||
|
|
62ce52c8fd | ||
|
|
2bdb9511bd | ||
|
|
9a012a53ef | ||
|
|
0aae0180fb | ||
|
|
1dd8ef09a6 | ||
|
|
95032e4710 | ||
|
|
b1351e2dee | ||
|
|
30c2596512 | ||
|
|
2b5e4b853c | ||
|
|
85bcb5874a | ||
|
|
92788e4cf4 | ||
|
|
8a698fef71 | ||
|
|
b49ce1713f | ||
|
|
c2b54d92f6 | ||
|
|
f965434022 | ||
|
|
a3ac62596c | ||
|
|
2faba02d6b | ||
|
|
ee158e1610 | ||
|
|
fa68efb1e7 | ||
|
|
8c53a4405a | ||
|
|
c32f699105 | ||
|
|
53aa8f5650 | ||
|
|
56887f3208 | ||
|
|
92180bc793 | ||
|
|
22aa16ab12 | ||
|
|
526b829a09 | ||
|
|
c44f311c4f | ||
|
|
9ea5d08ecd | ||
|
|
35deb1a8e2 | ||
|
|
c7f7c47388 | ||
|
|
cb7dab17cb | ||
|
|
cd419a35fe | ||
|
|
e06168596f | ||
|
|
4c8197a119 | ||
|
|
23c10350f3 | ||
|
|
b6e98aed01 | ||
|
|
00dcba9ddd | ||
|
|
607cafadbc | ||
|
|
68dde2359f | ||
|
|
f9dbf41e27 | ||
|
|
7405760f44 | ||
|
|
7e4a6b4bcd | ||
|
|
b5791e6f28 | ||
|
|
00cb58eaf3 | ||
|
|
f961ec4aaf | ||
|
|
134db72bb7 | ||
|
|
6fd0b434e2 | ||
|
|
effe21f3eb | ||
|
|
1118b285d3 | ||
|
|
912a0031b7 | ||
|
|
a14e192376 | ||
|
|
f8e15e7d09 | ||
|
|
7b9f9e0628 | ||
|
|
ac8e9ce04f | ||
|
|
cfd8645843 | ||
|
|
0c068b15c7 | ||
|
|
30a466aa71 | ||
|
|
4d94609c44 | ||
|
|
6b63123ca9 | ||
|
|
0cc9fb73e1 | ||
|
|
eac4e4b279 | ||
|
|
6d381f7c0a | ||
|
|
4fa06aefc6 | ||
|
|
0e177a44e0 | ||
|
|
afd19de5a9 | ||
|
|
e3fbac9e24 | ||
|
|
a9cf32811c | ||
|
|
53997ecc79 | ||
|
|
8e69f3cb89 | ||
|
|
997ba3a574 | ||
|
|
8e68ff9321 | ||
|
|
62761a23e6 | ||
|
|
404d8b3084 | ||
|
|
6005ad3d48 | ||
|
|
035a3ea4ae | ||
|
|
7ec43bd177 | ||
|
|
a29c66ed74 | ||
|
|
e104b183d8 | ||
|
|
7e082f232e | ||
|
|
d28bf71f25 | ||
|
|
5b1a74b6b2 | ||
|
|
eead4db1d2 | ||
|
|
980fb5e2ab | ||
|
|
9bcc46d93d | ||
|
|
22687c1f50 | ||
|
|
ebc6c9b498 | ||
|
|
630963fa6b | ||
|
|
f674168b8b | ||
|
|
7e023f2d50 | ||
|
|
27d02ea6f7 | ||
|
|
794a7993cb | ||
|
|
6f16d1cb2c | ||
|
|
7aa00bff89 | ||
|
|
e046eb1d17 | ||
|
|
ba975ca320 | ||
|
|
fec13b0ec1 | ||
|
|
100c35c281 | ||
|
|
8414aff424 | ||
|
|
f225ca3312 | ||
|
|
8b68e0bfdc | ||
|
|
6ae97aedc9 | ||
|
|
960d604013 | ||
|
|
63bf5f42a1 | ||
|
|
ff80cfd83d | ||
|
|
99fde2ba85 | ||
|
|
ce0cb913bc | ||
|
|
d99d16e8c3 | ||
|
|
31743789dc | ||
|
|
6fd63e920a | ||
|
|
59cc3e93d6 | ||
|
|
61a4bb38cd | ||
|
|
b192bc348c | ||
|
|
6440d0fbf3 | ||
|
|
ee0055b929 | ||
|
|
24ecc44bac | ||
|
|
0ae4915a93 | ||
|
|
4cd777a5e0 | ||
|
|
65028d4a35 | ||
|
|
caac9d216e | ||
|
|
057192913c | ||
|
|
f25cbdf43c | ||
|
|
6da4a9c7bb | ||
|
|
80ca120088 | ||
|
|
a669946357 | ||
|
|
7ffc162ea8 | ||
|
|
bcfd7fbbcf | ||
|
|
486e2e48ea | ||
|
|
2ddf2ca934 | ||
|
|
403ec7cf21 | ||
|
|
29b1a29044 | ||
|
|
b4ab8e095a | ||
|
|
ff4f4d4649 | ||
|
|
9987ff570b | ||
|
|
cff8235068 | ||
|
|
9ef132c33b | ||
|
|
ff8269575a | ||
|
|
7743d952dc | ||
|
|
944f3c1477 | ||
|
|
1d3bd02089 | ||
|
|
38de8e6936 | ||
|
|
6347fb6636 | ||
|
|
32e668eb94 | ||
|
|
c51f9ef940 | ||
|
|
1a91edecae | ||
|
|
c88308cf0e | ||
|
|
88837fb753 | ||
|
|
d0283ff354 | ||
|
|
f449a7ae2c | ||
|
|
a113b2c394 | ||
|
|
74851834c0 | ||
|
|
e377bb949a | ||
|
|
c905d3fe21 | ||
|
|
b6e9d235fe | ||
|
|
6968f7237a | ||
|
|
4a6c97463f | ||
|
|
6c912ac960 | ||
|
|
708cebe7f0 | ||
|
|
152023e837 | ||
|
|
0f16e19239 | ||
|
|
2c38e44e48 | ||
|
|
82739574b5 | ||
|
|
f78d677ab6 | ||
|
|
e39e2306d6 | ||
|
|
52229a21cb | ||
|
|
961f7dea82 | ||
|
|
feeeef71f1 | ||
|
|
65c4d550cb | ||
|
|
f9b4a8d6e8 | ||
|
|
e11d851aee | ||
|
|
ac81f0248c | ||
|
|
83bf15a703 | ||
|
|
cc960adbee | ||
|
|
c66c5828ea | ||
|
|
19387cafab | ||
|
|
7c0673279b | ||
|
|
7ce0d71a96 | ||
|
|
dd2542e96c | ||
|
|
21d60eab7c | ||
|
|
4d2320ba8b | ||
|
|
a4a74e9844 | ||
|
|
9588978028 | ||
|
|
479940b7d0 | ||
|
|
8cd967803c | ||
|
|
a0e1163fb6 | ||
|
|
8ccd1ee34a | ||
|
|
ca258c04cb | ||
|
|
30bd5e2669 | ||
|
|
38637897ba | ||
|
|
c727c8b684 | ||
|
|
993d96feef | ||
|
|
b2b26d9c95 | ||
|
|
cba3dd276b | ||
|
|
a47fc75c26 | ||
|
|
42cfdf246f | ||
|
|
e5c8794b8b | ||
|
|
ac90a873eb | ||
|
|
5ce68ad7fd | ||
|
|
099e88516d | ||
|
|
82a6ad2c10 | ||
|
|
c1a78224cf | ||
|
|
39f9350697 | ||
|
|
e31081d79d | ||
|
|
f02d282754 | ||
|
|
a89e0bab7d | ||
|
|
3a90af0bcd | ||
|
|
53ceb0791f | ||
|
|
2cd98a0d21 | ||
|
|
a0b10c05e5 | ||
|
|
04135fa6cd | ||
|
|
42dc6329e6 | ||
|
|
9b8ba97f9f | ||
|
|
7705605b5a | ||
|
|
414bcb0c73 | ||
|
|
f4710948c4 | ||
|
|
3f4488c589 | ||
|
|
9434fff215 | ||
|
|
695962fae8 | ||
|
|
8f13c8c3bf | ||
|
|
c1cae51fb5 | ||
|
|
31d16f6cc2 | ||
|
|
a50ea92c64 | ||
|
|
5b2ced0119 | ||
|
|
8a0ba093dd | ||
|
|
fbd8dfe60f | ||
|
|
60aff22931 | ||
|
|
5fc7da345d | ||
|
|
fd2c38fbef | ||
|
|
ba245c6c46 | ||
|
|
496027b589 | ||
|
|
8bd4f6568b | ||
|
|
9d7660b409 | ||
|
|
da55499db0 | ||
|
|
22f8e39b58 | ||
|
|
eba23bbac4 | ||
|
|
4550535cbb | ||
|
|
8432fd5ac2 | ||
|
|
7c948adf88 | ||
|
|
56b7045c20 | ||
|
|
b1a109a611 | ||
|
|
7a311a3b66 | ||
|
|
d55b6b9909 | ||
|
|
f4389fb322 | ||
|
|
331208bec1 | ||
|
|
7680e5f81d | ||
|
|
6acf038a84 | ||
|
|
bdf4e386cf | ||
|
|
ad8a34858f | ||
|
|
162eced7d2 | ||
|
|
bec1f7c26a | ||
|
|
8771617199 | ||
|
|
54bc995f0a | ||
|
|
6c89a81af4 | ||
|
|
8fa2898ff1 | ||
|
|
10ca0a6936 | ||
|
|
b3314e97a6 | ||
|
|
3b9a948045 | ||
|
|
3781a0f9ad | ||
|
|
e79b289325 | ||
|
|
6d4c1156d6 | ||
|
|
3f72c7fcc7 | ||
|
|
d521c84d55 | ||
|
|
946b070744 | ||
|
|
4a21dce2b5 | ||
|
|
5fe7f9fa93 | ||
|
|
65f34cd823 | ||
|
|
196e7e072b | ||
|
|
6f97663174 | ||
|
|
aed7a1818a | ||
|
|
b50d90183e | ||
|
|
6b06da76cb | ||
|
|
6ca6788bb7 | ||
|
|
2e23e61a45 | ||
|
|
9cdf490bc5 | ||
|
|
cfed671ea3 | ||
|
|
53ce92b9ca | ||
|
|
7350a29fec | ||
|
|
5cc2c62c66 | ||
|
|
4bc5ed6c76 | ||
|
|
e99a597899 | ||
|
|
73dde66dbe | ||
|
|
e30c0e7ca3 | ||
|
|
8fc200c0cc | ||
|
|
708296ae1b | ||
|
|
fbb5e75e01 | ||
|
|
f327b21557 | ||
|
|
45b7253f39 | ||
|
|
05bb655efc | ||
|
|
8fdfcfb562 | ||
|
|
e7c144eeac | ||
|
|
e98172d72d | ||
|
|
f2d063e7b9 | ||
|
|
a50f26b7f5 | ||
|
|
69294cf98a | ||
|
|
c397fb6c7a | ||
|
|
961b0b524e | ||
|
|
860fc200b0 | ||
|
|
109a9e3f35 | ||
|
|
5f971fea6e | ||
|
|
0d7abe3b9f | ||
|
|
94fbcd8ebe | ||
|
|
879d5dd236 | ||
|
|
34187e047d | ||
|
|
0ee722f8c3 | ||
|
|
b7d11141e1 | ||
|
|
e9babf3dac | ||
|
|
0bb81f2e9c | ||
|
|
bea0b050cd | ||
|
|
ce62980d4e | ||
|
|
dc88865908 | ||
|
|
9fbd931058 | ||
|
|
b0264bdb90 | ||
|
|
95d6f43cc8 | ||
|
|
9cb94eb4a9 | ||
|
|
bd0819330d | ||
|
|
8d9e83fd99 | ||
|
|
be02333529 | ||
|
|
506f121576 | ||
|
|
ca488cce87 | ||
|
|
11dc723324 | ||
|
|
dd6ea18901 | ||
|
|
3369eeb920 | ||
|
|
9032f49f25 | ||
|
|
fbc6f3f6e8 | ||
|
|
fba883839d | ||
|
|
a93214ea63 | ||
|
|
e6b0fc465b | ||
|
|
70fbcfee4a | ||
|
|
0b074d0fae | ||
|
|
d67e4d5b17 | ||
|
|
891c60d83d | ||
|
|
fe3e49c4eb | ||
|
|
58306a9d34 | ||
|
|
41091d9472 | ||
|
|
a4cfb5e1ed | ||
|
|
51aa59a737 | ||
|
|
8bedb419a9 | ||
|
|
f56a182b71 | ||
|
|
317b40ef90 | ||
|
|
e938ece492 | ||
|
|
02331a612c | ||
|
|
8317557f70 | ||
|
|
1bb7a2a295 | ||
|
|
215ca58d6a | ||
|
|
12f570a307 | ||
|
|
e4b619ce1a | ||
|
|
0a286153bb | ||
|
|
22d59e757d | ||
|
|
0daa2dbf59 | ||
|
|
96c2304ae8 | ||
|
|
343dd2f491 | ||
|
|
38f35463b7 | ||
|
|
5573986e8e | ||
|
|
f3367a1b20 | ||
|
|
a3c2f7b0e8 | ||
|
|
8fbec30998 | ||
|
|
a7466eeb0e | ||
|
|
8b1e819bf3 | ||
|
|
fe63664164 | ||
|
|
4598827dcb | ||
|
|
9afdb05bf4 | ||
|
|
9569a85cee | ||
|
|
54721b7c7b | ||
|
|
91d8bddbd1 | ||
|
|
80adc87a14 | ||
|
|
117ad1b65b | ||
|
|
2229509362 | ||
|
|
6ef8e87492 | ||
|
|
0a25083fdb | ||
|
|
15137d0327 | ||
|
|
8c9974bc0f | ||
|
|
079b6c2b50 | ||
|
|
0924b34a17 |
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,6 +1,6 @@
|
||||
## Community Contribution License
|
||||
All community contributions in this pull request are licensed to the project maintainers
|
||||
under the terms of the [Apache 2 license] (https://www.apache.org/licenses/LICENSE-2.0).
|
||||
under the terms of the [Apache 2 license](https://www.apache.org/licenses/LICENSE-2.0).
|
||||
By creating this pull request I represent that I have the right to license the
|
||||
contributions to the project maintainers under the Apache 2 license.
|
||||
|
||||
|
||||
4
.github/workflows/depsreview.yaml
vendored
4
.github/workflows/depsreview.yaml
vendored
@@ -9,6 +9,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v1
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
||||
11
.github/workflows/go-cross.yml
vendored
11
.github/workflows/go-cross.yml
vendored
@@ -3,12 +3,11 @@ name: Crosscompile
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,11 +20,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
9
.github/workflows/go-fips.yml
vendored
9
.github/workflows/go-fips.yml
vendored
@@ -3,8 +3,7 @@ name: FIPS Build Test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,11 +20,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
|
||||
9
.github/workflows/go-healing.yml
vendored
9
.github/workflows/go-healing.yml
vendored
@@ -3,8 +3,7 @@ name: Healing Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,11 +20,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
18
.github/workflows/go-lint.yml
vendored
18
.github/workflows/go-lint.yml
vendored
@@ -3,12 +3,11 @@ name: Linters and Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,23 +20,24 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest, Windows]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
if: matrix.os == 'Windows'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
Set-MpPreference -DisableRealtimeMonitoring $true
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
go test -v --timeout 120m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
9
.github/workflows/go.yml
vendored
9
.github/workflows/go.yml
vendored
@@ -3,8 +3,7 @@ name: Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,11 +20,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
7
.github/workflows/helm-lint.yml
vendored
7
.github/workflows/helm-lint.yml
vendored
@@ -3,8 +3,7 @@ name: Helm Chart linting
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -20,10 +19,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
uses: azure/setup-helm@v4
|
||||
|
||||
- name: Run helm lint
|
||||
run: |
|
||||
|
||||
15
.github/workflows/iam-integrations.yaml
vendored
15
.github/workflows/iam-integrations.yaml
vendored
@@ -3,8 +3,7 @@ name: IAM integration
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -62,7 +61,7 @@ jobs:
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@@ -76,8 +75,8 @@ jobs:
|
||||
openid: "http://127.0.0.1:5556/dex"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
@@ -112,6 +111,12 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go run docs/iam/access-manager-plugin.go &
|
||||
make test-iam
|
||||
- name: Test MinIO Old Version data to IAM import current version
|
||||
if: matrix.ldap == 'ldaphost:389'
|
||||
env:
|
||||
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
run: |
|
||||
make test-iam-ldap-upgrade-import
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
|
||||
18
.github/workflows/issues.yaml
vendored
Normal file
18
.github/workflows/issues.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# @format
|
||||
|
||||
name: Issue Workflow
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
add-to-project:
|
||||
name: Add issue to project
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/add-to-project@v0.5.0
|
||||
with:
|
||||
project-url: https://github.com/orgs/miniohq/projects/2
|
||||
github-token: ${{ secrets.BOT_PAT }}
|
||||
12
.github/workflows/mint.yml
vendored
12
.github/workflows/mint.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -25,12 +24,12 @@ jobs:
|
||||
sudo -S rm -rf ${GITHUB_WORKSPACE}
|
||||
mkdir ${GITHUB_WORKSPACE}
|
||||
- name: checkout-step
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version: 1.22.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
@@ -56,6 +55,11 @@ jobs:
|
||||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
# FIXME: renable this back when we have a valid way to add deadlines for PUT()s (internode CreateFile)
|
||||
# - name: resiliency
|
||||
# run: |
|
||||
# ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: The job must cleanup
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
|
||||
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
environment:
|
||||
MINIO_CI_CD: "on"
|
||||
MINIO_ROOT_USER: "minio"
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
MINIO_DRIVE_MAX_TIMEOUT: "5s"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- rdata1-1:/rdata1
|
||||
- rdata1-2:/rdata2
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- rdata2-1:/rdata1
|
||||
- rdata2-2:/rdata2
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- rdata3-1:/rdata1
|
||||
- rdata3-2:/rdata2
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- rdata4-1:/rdata1
|
||||
- rdata4-2:/rdata2
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: nginx
|
||||
volumes:
|
||||
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
rdata1-1:
|
||||
rdata1-2:
|
||||
rdata2-1:
|
||||
rdata2-2:
|
||||
rdata3-1:
|
||||
rdata3-2:
|
||||
rdata4-1:
|
||||
rdata4-2:
|
||||
7
.github/workflows/mint/nginx-4-node.conf
vendored
7
.github/workflows/mint/nginx-4-node.conf
vendored
@@ -23,10 +23,9 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
16
.github/workflows/mint/nginx-8-node.conf
vendored
16
.github/workflows/mint/nginx-8-node.conf
vendored
@@ -23,14 +23,14 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio5:9000;
|
||||
server minio6:9000;
|
||||
server minio7:9000;
|
||||
server minio8:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio5:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio6:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio7:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio8:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
8
.github/workflows/mint/nginx.conf
vendored
8
.github/workflows/mint/nginx.conf
vendored
@@ -23,10 +23,10 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
80
.github/workflows/multipart/migrate.sh
vendored
80
.github/workflows/multipart/migrate.sh
vendored
@@ -5,22 +5,25 @@ set -x
|
||||
## change working directory
|
||||
cd .github/workflows/multipart/
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml rm -s -f
|
||||
docker-compose -f docker-compose-site2.yaml rm -s -f
|
||||
for volume in $(docker volume ls -q | grep minio); do
|
||||
docker volume rm ${volume}
|
||||
done
|
||||
function cleanup() {
|
||||
docker-compose -f docker-compose-site1.yaml rm -s -f || true
|
||||
docker-compose -f docker-compose-site2.yaml rm -s -f || true
|
||||
for volume in $(docker volume ls -q | grep minio); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
}
|
||||
|
||||
cleanup
|
||||
|
||||
if [ ! -f ./mc ]; then
|
||||
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x mc
|
||||
fi
|
||||
|
||||
(
|
||||
cd /tmp
|
||||
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
)
|
||||
|
||||
export RELEASE=RELEASE.2023-08-29T23-07-35Z
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
@@ -40,10 +43,10 @@ sleep 30s
|
||||
|
||||
sleep 5
|
||||
|
||||
s3-check-md5 -h
|
||||
./s3-check-md5 -h
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -59,8 +62,8 @@ fi
|
||||
|
||||
sleep 5
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
## we do not need to fail here, since we are going to test
|
||||
## upgrading to master, healing and being able to recover
|
||||
@@ -88,8 +91,8 @@ for i in $(seq 1 10); do
|
||||
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
|
||||
done
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -101,15 +104,44 @@ if [ $failed_count_site2 -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml rm -s -f
|
||||
docker-compose -f docker-compose-site2.yaml rm -s -f
|
||||
for volume in $(docker volume ls -q | grep minio); do
|
||||
docker volume rm ${volume}
|
||||
# Add user group test
|
||||
./mc admin user add site1 site-replication-issue-user site-replication-issue-password
|
||||
./mc admin group add site1 site-replication-issue-group site-replication-issue-user
|
||||
|
||||
max_wait_attempts=30
|
||||
wait_interval=5
|
||||
|
||||
attempt=1
|
||||
while true; do
|
||||
diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Outputs are consistent."
|
||||
break
|
||||
fi
|
||||
|
||||
remaining_attempts=$((max_wait_attempts - attempt))
|
||||
if ((attempt >= max_wait_attempts)); then
|
||||
echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error."
|
||||
exit 1
|
||||
else
|
||||
echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)."
|
||||
sleep $wait_interval
|
||||
fi
|
||||
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"')
|
||||
|
||||
if [[ $status == "enabled" ]]; then
|
||||
echo "Success"
|
||||
else
|
||||
echo "Expected status: enabled, actual status: $status"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cleanup
|
||||
|
||||
## change working directory
|
||||
cd ../../../
|
||||
|
||||
32
.github/workflows/replication.yaml
vendored
32
.github/workflows/replication.yaml
vendored
@@ -3,8 +3,7 @@ name: MinIO advanced tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -22,11 +21,11 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
@@ -36,6 +35,24 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-decom
|
||||
|
||||
- name: Test ILM
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-pbac
|
||||
|
||||
- name: Test Config File
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-configfile
|
||||
|
||||
- name: Test Replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
@@ -48,3 +65,8 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-site-replication-minio
|
||||
|
||||
- name: Test Versioning
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-versioning
|
||||
|
||||
9
.github/workflows/root-disable.yml
vendored
9
.github/workflows/root-disable.yml
vendored
@@ -3,8 +3,7 @@ name: Root lockdown tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,12 +20,12 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
17
.github/workflows/run-mint.sh
vendored
17
.github/workflows/run-mint.sh
vendored
@@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
cd .github/workflows/mint
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 30s
|
||||
sleep 1m
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
@@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
|
||||
|
||||
# Pause one node, to check that all S3 calls work while one node goes wrong
|
||||
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
|
||||
|
||||
docker run --rm --net=mint_default \
|
||||
--name="mint-${MODE}-${JOB_NAME}" \
|
||||
-e SERVER_ENDPOINT="nginx:9000" \
|
||||
@@ -35,6 +38,18 @@ docker run --rm --net=mint_default \
|
||||
-e MINT_MODE="${MINT_MODE}" \
|
||||
docker.io/minio/mint:edge
|
||||
|
||||
# FIXME: enable this after fixing aws-sdk-java-v2 tests
|
||||
# # unpause the node, to check that all S3 calls work while one node goes wrong
|
||||
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
|
||||
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
|
||||
# --name="mint-${MODE}-${JOB_NAME}" \
|
||||
# -e SERVER_ENDPOINT="nginx:9000" \
|
||||
# -e ACCESS_KEY="${ACCESS_KEY}" \
|
||||
# -e SECRET_KEY="${SECRET_KEY}" \
|
||||
# -e ENABLE_HTTPS=0 \
|
||||
# -e MINT_MODE="${MINT_MODE}" \
|
||||
# docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml down || true
|
||||
sleep 10s
|
||||
|
||||
|
||||
5
.github/workflows/shfmt.yml
vendored
5
.github/workflows/shfmt.yml
vendored
@@ -3,8 +3,7 @@ name: Shell formatting checks
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -14,7 +13,7 @@ jobs:
|
||||
name: runner / shfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: luizm/action-sh-checker@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
15
.github/workflows/typos.yml
vendored
Normal file
15
.github/workflows/typos.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
name: Spelling
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
run:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Actions Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check spelling of repo
|
||||
uses: crate-ci/typos@master
|
||||
|
||||
9
.github/workflows/upgrade-ci-cd.yaml
vendored
9
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -3,8 +3,7 @@ name: Upgrade old version tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,12 +20,12 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
11
.github/workflows/vulncheck.yml
vendored
11
.github/workflows/vulncheck.yml
vendored
@@ -3,11 +3,10 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
@@ -18,15 +17,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.3
|
||||
go-version: 1.22.4
|
||||
check-latest: true
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
run: govulncheck -show verbose ./...
|
||||
shell: bash
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -43,3 +43,13 @@ docs/debugging/inspect/inspect
|
||||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
docs/debugging/populate-hard-links/populate-hardlinks
|
||||
docs/debugging/xattr/xattr
|
||||
hash-set
|
||||
healing-bin
|
||||
inspect
|
||||
pprofgoparser
|
||||
reorder-disks
|
||||
s3-check-md5
|
||||
s3-verify
|
||||
xattr
|
||||
xl-meta
|
||||
|
||||
@@ -29,5 +29,8 @@ linters:
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- "empty-block:"
|
||||
- "unused-parameter:"
|
||||
- "dot-imports:"
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
|
||||
38
.typos.toml
Normal file
38
.typos.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
[files]
|
||||
extend-exclude = [
|
||||
".git/",
|
||||
"docs/",
|
||||
]
|
||||
ignore-hidden = false
|
||||
|
||||
[default]
|
||||
extend-ignore-re = [
|
||||
"Patrick Collison",
|
||||
"Copyright 2014 Unknwon",
|
||||
"[0-9A-Za-z/+=]{64}",
|
||||
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
|
||||
"eyJmb28iOiJiYXIifQ",
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
|
||||
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
"encrypter" = "encrypter"
|
||||
"kms" = "kms"
|
||||
"requestor" = "requestor"
|
||||
|
||||
[default.extend-identifiers]
|
||||
"HashiCorp" = "HashiCorp"
|
||||
|
||||
[type.go.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"dm2nd" = "dm2nd"
|
||||
"ot" = "ot"
|
||||
"ParseND" = "ParseND"
|
||||
"ParseNDStream" = "ParseNDStream"
|
||||
"pn" = "pn"
|
||||
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
||||
"thr" = "thr"
|
||||
"toi" = "toi"
|
||||
@@ -9,7 +9,7 @@ ENV CGO_ENABLED 0
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
@@ -21,11 +21,16 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:9.2
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
@@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@@ -9,7 +9,7 @@ ENV CGO_ENABLED 0
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
@@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
@@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@@ -9,17 +9,22 @@ ENV CGO_ENABLED 0
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:9.2
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
@@ -41,6 +46,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@@ -9,7 +9,7 @@ ENV CGO_ENABLED 0
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.0
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
@@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
@@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
105
Makefile
105
Makefile
@@ -6,9 +6,9 @@ GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
TAG ?= "quay.io/minio/minio:$(VERSION)"
|
||||
REPO ?= quay.io/minio
|
||||
TAG ?= $(REPO)/minio:$(VERSION)
|
||||
|
||||
GOLANGCI_VERSION = v1.51.2
|
||||
GOLANGCI_DIR = .bin/golangci/$(GOLANGCI_VERSION)
|
||||
GOLANGCI = $(GOLANGCI_DIR)/golangci-lint
|
||||
|
||||
@@ -23,8 +23,8 @@ help: ## print this help
|
||||
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR) $(GOLANGCI_VERSION)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.10-0.20240227114326-6d6f813fff1b
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@@ -45,20 +45,36 @@ lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
|
||||
|
||||
check: test
|
||||
test: verifiers build build-debugging ## builds minio, runs linters, tests
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./...
|
||||
|
||||
test-root-disable: install-race
|
||||
@echo "Running minio root lockdown tests"
|
||||
@env bash $(PWD)/buildscripts/disable-root.sh
|
||||
|
||||
test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
|
||||
test-decom: install-race
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
|
||||
|
||||
test-versioning: install-race
|
||||
@echo "Running minio versioning tests"
|
||||
@env bash $(PWD)/docs/bucket/versioning/versioning-tests.sh
|
||||
|
||||
test-configfile: install-race
|
||||
@env bash $(PWD)/docs/distributed/distributed-from-config-file.sh
|
||||
|
||||
test-upgrade: install-race
|
||||
@echo "Running minio upgrade tests"
|
||||
@@ -68,11 +84,15 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
test-iam: install-race ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
@@ -86,7 +106,10 @@ test-replication-3site:
|
||||
test-delete-replication:
|
||||
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
|
||||
test-delete-marker-proxying:
|
||||
@(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
|
||||
test-site-replication-ldap: install-race ## verify automatic site replication
|
||||
@@ -100,38 +123,39 @@ test-site-replication-oidc: install-race ## verify automatic site replication
|
||||
test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
|
||||
@echo "Running tests for automatic site replication of SSE-C objects"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication.sh)
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with SSE-KMS enabled for bucket"
|
||||
@(env bash $(PWD)/docs/site-replication/run-sse-kms-object-replication.sh)
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
verify-healing: install-race ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
|
||||
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
verify-healing-with-root-disks: install-race ## verify healing root disks
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
@echo "Verify healing with rewrite"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build-debugging:
|
||||
@(env bash $(PWD)/docs/debugging/build.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
@@ -139,16 +163,24 @@ hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
$(eval TAG := "minio/minio:$(VERSION)")
|
||||
|
||||
hotfix: hotfix-vars install ## builds minio binary with hotfix tags
|
||||
@mv -f ./minio ./minio.$(VERSION)
|
||||
@minisign -qQSm ./minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < ./minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio.$(VERSION).sha256sum
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.1/pkger_2.3.1_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.3.1_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
@minisign -qQSm minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) -s "${CRED_DIR}/minisign.key" < "${CRED_DIR}/minisign-passphrase"
|
||||
@sha256sum < minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) | sed 's, -,minio.$(VERSION),g' > minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION).sha256sum
|
||||
@cp -af minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)* minio-release/$(GOOS)-$(GOARCH)/archive/
|
||||
@pkger -r $(VERSION) --ignore
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio.$(VERSION)* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@@ -162,15 +194,15 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install-race: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean: ## cleanup all generated assets
|
||||
@@ -183,3 +215,6 @@ clean: ## cleanup all generated assets
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
@rm -rvf .verify*
|
||||
@rm -rvf minio-release
|
||||
@rm -rvf minio.RELEASE*.hotfix.*
|
||||
@rm -rvf pkger_*.deb
|
||||
|
||||
@@ -123,7 +123,7 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
|
||||
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.19](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go install github.com/minio/minio@latest
|
||||
@@ -210,10 +210,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
|
||||
|
||||
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
|
||||
|
||||
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
0
buildscripts/checkdeps.sh
Normal file → Executable file
0
buildscripts/checkdeps.sh
Normal file → Executable file
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -32,6 +32,7 @@ fi
|
||||
set +e
|
||||
|
||||
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
|
||||
./mc ready minioadm
|
||||
|
||||
./mc ls minioadm/
|
||||
|
||||
@@ -56,7 +57,7 @@ done
|
||||
|
||||
set +e
|
||||
|
||||
sleep 10
|
||||
./mc ready minioadm/
|
||||
|
||||
./mc ls minioadm/
|
||||
if [ $? -ne 0 ]; then
|
||||
@@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin replicate add sitea siteb
|
||||
|
||||
./mc admin user add sitea foobar foo12345
|
||||
@@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin user add sitea foobar-admin foo12345
|
||||
|
||||
sleep 2s
|
||||
|
||||
@@ -44,7 +44,6 @@ func main() {
|
||||
opts := madmin.HealOpts{
|
||||
Recursive: true, // recursively heal all objects at 'prefix'
|
||||
Remove: true, // remove content that has lost quorum and not recoverable
|
||||
Recreate: true, // rewrite all old non-inlined xl.meta to new xl.meta
|
||||
ScanMode: madmin.HealNormalScan, // by default do not do 'deep' scanning
|
||||
}
|
||||
|
||||
|
||||
127
buildscripts/minio-iam-ldap-upgrade-import-test.sh
Executable file
127
buildscripts/minio-iam-ldap-upgrade-import-test.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to test the migration of IAM content from old minio
|
||||
# instance to new minio instance.
|
||||
#
|
||||
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
|
||||
# repo (e.g. make podman-run), and then run this script.
|
||||
#
|
||||
# This script assumes that LDAP server is at:
|
||||
#
|
||||
# `localhost:1389`
|
||||
#
|
||||
# if this is not the case, set the environment variable
|
||||
# `_MINIO_LDAP_TEST_SERVER`.
|
||||
|
||||
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
|
||||
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
|
||||
|
||||
__init__() {
|
||||
if which curl &>/dev/null; then
|
||||
echo "curl is already installed"
|
||||
else
|
||||
echo "Installing curl:"
|
||||
sudo apt install curl -y
|
||||
fi
|
||||
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH="${PATH}":"${GOPATH}"/bin
|
||||
|
||||
if which mc &>/dev/null; then
|
||||
echo "mc is already installed"
|
||||
else
|
||||
echo "Installing mc:"
|
||||
go install github.com/minio/mc@latest
|
||||
fi
|
||||
|
||||
if [ ! -x ./minio.${OLD_VERSION} ]; then
|
||||
echo "Downloading minio.${OLD_VERSION} binary"
|
||||
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
|
||||
chmod +x minio.${OLD_VERSION}
|
||||
fi
|
||||
|
||||
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:1389
|
||||
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
|
||||
fi
|
||||
|
||||
rm -rf /tmp/data
|
||||
}
|
||||
|
||||
create_iam_content_in_old_minio() {
|
||||
echo "Creating IAM content in old minio instance."
|
||||
|
||||
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:1389 \
|
||||
server_insecure=on \
|
||||
lookup_bind_dn=cn=admin,dc=min,dc=io \
|
||||
lookup_bind_password=admin \
|
||||
user_dn_search_base_dn=dc=min,dc=io \
|
||||
user_dn_search_filter="(uid=%s)" \
|
||||
group_search_base_dn=ou=swengg,dc=min,dc=io \
|
||||
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
|
||||
mc admin service restart old-minio
|
||||
|
||||
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
|
||||
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
|
||||
|
||||
mc idp ldap policy entities old-minio
|
||||
|
||||
mc admin cluster iam export old-minio
|
||||
set +x
|
||||
|
||||
mc admin service stop old-minio
|
||||
}
|
||||
|
||||
import_iam_content_in_new_minio() {
|
||||
echo "Importing IAM content in new minio instance."
|
||||
# Assume current minio binary exists.
|
||||
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
|
||||
echo "BEFORE IMPORT mappings:"
|
||||
mc ready new-minio
|
||||
mc idp ldap policy entities new-minio
|
||||
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
|
||||
echo "AFTER IMPORT mappings:"
|
||||
mc idp ldap policy entities new-minio
|
||||
set +x
|
||||
|
||||
# mc admin service stop new-minio
|
||||
}
|
||||
|
||||
verify_iam_content_in_new_minio() {
|
||||
output=$(mc idp ldap policy entities new-minio --json)
|
||||
|
||||
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
|
||||
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify groups: $groups"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
|
||||
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify users: $users"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mc admin service stop new-minio
|
||||
}
|
||||
|
||||
main() {
|
||||
create_iam_content_in_old_minio
|
||||
|
||||
import_iam_content_in_new_minio
|
||||
|
||||
verify_iam_content_in_new_minio
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
27
buildscripts/minio-upgrade.sh
Normal file → Executable file
27
buildscripts/minio-upgrade.sh
Normal file → Executable file
@@ -4,10 +4,22 @@ trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
down || true
|
||||
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm || true
|
||||
|
||||
for volume in $(docker volume ls -q | grep upgrade); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker volume prune -f
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
@@ -55,6 +67,13 @@ __init__() {
|
||||
|
||||
go install github.com/minio/mc@latest
|
||||
|
||||
## this is needed because github actions don't have
|
||||
## docker-compose on all runners
|
||||
go install github.com/docker/compose/v2/cmd@latest
|
||||
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
|
||||
|
||||
cleanup
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
@@ -72,11 +91,11 @@ __init__() {
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
|
||||
@@ -45,7 +45,8 @@ function verify_rewrite() {
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -77,7 +78,8 @@ function verify_rewrite() {
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -87,13 +89,12 @@ function verify_rewrite() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
@@ -113,7 +114,7 @@ function verify_rewrite() {
|
||||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO_OLD=("$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
function download_old_release() {
|
||||
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
|
||||
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
|
||||
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
|
||||
fi
|
||||
}
|
||||
|
||||
function start_minio_16drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
|
||||
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
|
||||
"${WORK_DIR}/mc" cp \
|
||||
"${WORK_DIR}/unaligned" \
|
||||
minio/healing-shard-bucket/unaligned \
|
||||
--disable-multipart --quiet
|
||||
|
||||
## "unaligned" object name gets consistently distributed
|
||||
## to disks in following distribution order
|
||||
##
|
||||
## NOTE: if you change the name make sure to change the
|
||||
## distribution order present here
|
||||
##
|
||||
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
|
||||
|
||||
## make sure to remove the "last" data shard
|
||||
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
## Heal the shard
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
## then remove any other data shard let's pick first disk
|
||||
## - 1st data shard.
|
||||
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
|
||||
go install github.com/minio/minio/docs/debugging/s3-check-md5@latest
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 30
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(
|
||||
cd inspects
|
||||
"${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**
|
||||
)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
if ! s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(
|
||||
cd inspects
|
||||
"${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**
|
||||
)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
download_old_release
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_16drive ${start_port}
|
||||
}
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
(main "$@")
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
|
||||
@@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
export MINT_MODE=core
|
||||
export MINT_DATA_DIR="$WORK_DIR/data"
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
|
||||
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
|
||||
export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
@@ -36,18 +37,21 @@ function start_minio_fs() {
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure() {
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets() {
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets() {
|
||||
@@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
|
||||
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets_ipv6() {
|
||||
@@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
|
||||
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify_ipv6
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure() {
|
||||
@@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
|
||||
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function run_test_fs() {
|
||||
@@ -222,7 +226,7 @@ function __init__() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
151
buildscripts/verify-healing-empty-erasure-set.sh
Executable file
151
buildscripts/verify-healing-empty-erasure-set.sh
Executable file
@@ -0,0 +1,151 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
# Wait for all drives to be online and formatted
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
# Wait for all drives to be healed
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
|
||||
# Wait for Status: in MinIO output
|
||||
while true; do
|
||||
rv=$(check_online)
|
||||
if [ "$rv" != "1" ]; then
|
||||
# success
|
||||
break
|
||||
fi
|
||||
|
||||
# Check if we should retry
|
||||
retry=$((retry + 1))
|
||||
if [ $retry -le 20 ]; then
|
||||
sleep 5
|
||||
continue
|
||||
fi
|
||||
|
||||
# Failure
|
||||
fail
|
||||
done
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "minio-server-1 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "minio-server-2 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "minio-server-3 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge() {
|
||||
echo rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node $2
|
||||
}
|
||||
|
||||
function main() {
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -12,17 +12,26 @@ fi
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
GOPATH=/tmp/gopath
|
||||
|
||||
function start_minio_3_node() {
|
||||
for i in $(seq 1 3); do
|
||||
rm "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$2
|
||||
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
for d in $(seq 1 3 5); do
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
d=$((d + 1))
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
@@ -37,40 +46,26 @@ function start_minio_3_node() {
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
[ ${first_time} -eq 0 ] && upload_objects
|
||||
[ ${first_time} -ne 0 ] && sleep 120
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 1 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 2 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 3 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
@@ -82,16 +77,40 @@ function start_minio_3_node() {
|
||||
fi
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if grep -q 'Unable to initialize sub-systems' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
function check_heal() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
|
||||
v1=$?
|
||||
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
|
||||
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
test $foundFiles1 -eq $foundFiles2
|
||||
v2=$?
|
||||
[ $v1 == 0 -a $v2 == 0 ] && return 0
|
||||
sleep 10
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
@@ -99,28 +118,37 @@ function __init__() {
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_objects() {
|
||||
/tmp/mc mb myminio/testbucket/
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
|
||||
done
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120 $2
|
||||
start_port=$2
|
||||
|
||||
start_minio_3_node $start_port
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
echo "Remove the contents of the disks belonging to '${1}' node"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 120 $2
|
||||
set -x
|
||||
start_minio_3_node $start_port
|
||||
|
||||
rv=$(check_online)
|
||||
check_heal ${1}
|
||||
rv=$?
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/kes-go"
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
@@ -39,9 +39,8 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -94,12 +93,12 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
Quota: data,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
if quotaConfig.Size == 0 || quotaConfig.Quota == 0 {
|
||||
if quotaConfig.Size == 0 && quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -431,7 +430,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -447,7 +446,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -680,24 +679,17 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
opts := MakeBucketOptions{
|
||||
LockEnabled: config.Enabled(),
|
||||
ForceCreate: true, // ignore if it already exists
|
||||
}
|
||||
err = objectAPI.MakeBucket(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
v := newBucketMetadata(bucket)
|
||||
v, _ := globalBucketMetadataSys.Get(bucket)
|
||||
bucketMap[bucket] = &v
|
||||
}
|
||||
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
bucketMap[bucket].ObjectLockConfigXML = configData
|
||||
bucketMap[bucket].ObjectLockConfigUpdatedAt = updatedAt
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
@@ -724,13 +716,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
continue
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
if err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{}); err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
if err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{
|
||||
ForceCreate: true, // ignore if it already exists
|
||||
}); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
v := newBucketMetadata(bucket)
|
||||
v, _ := globalBucketMetadataSys.Get(bucket)
|
||||
bucketMap[bucket] = &v
|
||||
}
|
||||
|
||||
@@ -743,7 +735,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
@@ -776,14 +768,14 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
|
||||
// create bucket if it does not exist yet.
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{})
|
||||
err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{
|
||||
ForceCreate: true, // ignore if it already exists
|
||||
})
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
rpt.SetStatus(bucket, "", err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, "", err)
|
||||
continue
|
||||
}
|
||||
v := newBucketMetadata(bucket)
|
||||
v, _ := globalBucketMetadataSys.Get(bucket)
|
||||
bucketMap[bucket] = &v
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
@@ -791,7 +783,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
@@ -845,9 +837,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
@@ -882,8 +878,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: kmsKey,
|
||||
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
||||
|
||||
@@ -23,11 +23,11 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/kes-go"
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
@@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,10 +31,9 @@ import (
|
||||
"github.com/minio/minio/internal/config"
|
||||
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/ldap"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
@@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,13 +19,17 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
@@ -101,6 +105,12 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
|
||||
// fail if ldap is not enabled
|
||||
if !globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -128,7 +138,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -175,3 +185,297 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// AddServiceAccountLDAP adds a new service account for provided LDAP username or DN
|
||||
//
|
||||
// PUT /minio/admin/v3/idp/ldap/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// fail if ldap is not enabled
|
||||
if !globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Find the user for the request sender (as it may be sent via a service
|
||||
// account or STS account):
|
||||
requestorUser := cred.AccessKey
|
||||
requestorParentUser := cred.AccessKey
|
||||
requestorGroups := cred.Groups
|
||||
requestorIsDerivedCredential := false
|
||||
if cred.IsServiceAccount() || cred.IsTemp() {
|
||||
requestorParentUser = cred.ParentUser
|
||||
requestorIsDerivedCredential = true
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
|
||||
var (
|
||||
targetGroups []string
|
||||
err error
|
||||
)
|
||||
|
||||
// If we are creating svc account for request sender, ensure that targetUser
|
||||
// is a real user (i.e. not derived credentials).
|
||||
if isSvcAccForRequestor {
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx,
|
||||
errors.New("service accounts cannot be generated for temporary credentials without parent")), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = requestorParentUser
|
||||
}
|
||||
targetGroups = requestorGroups
|
||||
|
||||
// Deny if the target user is not LDAP
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundResult == nil {
|
||||
err := errors.New("Specified user does not exist on LDAP server")
|
||||
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
|
||||
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// In case of LDAP/OIDC we need to set `opts.claims` to ensure
|
||||
// it is associated with the LDAP/OIDC user properly.
|
||||
for k, v := range cred.Claims {
|
||||
if k == expClaim {
|
||||
continue
|
||||
}
|
||||
opts.claims[k] = v
|
||||
}
|
||||
} else {
|
||||
// We still need to ensure that the target user is a valid LDAP user.
|
||||
//
|
||||
// The target user may be supplied as a (short) username or a DN.
|
||||
// However, for now, we only support using the short username.
|
||||
|
||||
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
// if not found, check if DN
|
||||
if strings.Contains(err.Error(), "User DN not found for:") {
|
||||
if isDN {
|
||||
// warn user that DNs are not allowed
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminLDAPExpectedLoginName, err), r.URL)
|
||||
} else {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
}
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Check if this user or their groups have a policy applied.
|
||||
ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if len(ldapPolicies) == 0 {
|
||||
err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`"))
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
}
|
||||
|
||||
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
createResp := madmin.AddServiceAccountResp{
|
||||
Credentials: madmin.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
Expiration: newCred.Expiration,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(createResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
Parent: newCred.ParentUser,
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
Groups: newCred.Groups,
|
||||
Name: newCred.Name,
|
||||
Description: newCred.Description,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
Status: auth.AccountOn,
|
||||
Expiration: createReq.Expiration,
|
||||
},
|
||||
},
|
||||
UpdatedAt: updatedAt,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
// ListAccessKeysLDAP - GET /minio/admin/v3/idp/ldap/list-access-keys
|
||||
func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userDN := r.Form.Get("userDN")
|
||||
|
||||
// If listing is requested for a specific user (who is not the request
|
||||
// sender), check that the user has permissions.
|
||||
if userDN != "" && userDN != cred.ParentUser {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: true,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
userDN = cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
userDN = cred.ParentUser
|
||||
}
|
||||
}
|
||||
|
||||
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if dnResult == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
|
||||
return
|
||||
}
|
||||
targetAccount := dnResult.NormDN
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {
|
||||
// default to both
|
||||
listType = ""
|
||||
}
|
||||
|
||||
var serviceAccounts []auth.Credentials
|
||||
var stsKeys []auth.Credentials
|
||||
|
||||
if listType == "" || listType == "sts-only" {
|
||||
stsKeys, err = globalIAMSys.ListSTSAccounts(ctx, targetAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if listType == "" || listType == "svcacc-only" {
|
||||
serviceAccounts, err = globalIAMSys.ListServiceAccounts(ctx, targetAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var serviceAccountList []madmin.ServiceAccountInfo
|
||||
var stsKeyList []madmin.ServiceAccountInfo
|
||||
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
expiryTime := sts.Expiration
|
||||
stsKeyList = append(stsKeyList, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
|
||||
listResp := madmin.ListAccessKeysLDAPResp{
|
||||
ServiceAccounts: serviceAccountList,
|
||||
STSKeys: stsKeyList,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(listResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,15 +18,17 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -66,16 +68,28 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
byID := vars["by-id"] == "true"
|
||||
|
||||
pools := strings.Split(v, ",")
|
||||
poolIndices := make([]int, 0, len(pools))
|
||||
|
||||
for _, pool := range pools {
|
||||
idx := globalEndpoints.GetPoolIdx(pool)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
var idx int
|
||||
if byID {
|
||||
var err error
|
||||
idx, err = strconv.Atoi(pool)
|
||||
if err != nil {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
idx = globalEndpoints.GetPoolIdx(pool)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
var pool *erasureSets
|
||||
for pidx := range z.serverPools {
|
||||
@@ -93,21 +107,12 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
poolIndices = append(poolIndices, idx)
|
||||
}
|
||||
|
||||
if len(poolIndices) > 0 && !globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal {
|
||||
ep := globalEndpoints[poolIndices[0]].Endpoints[0]
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(poolIndices) == 0 || !proxyDecommissionRequest(ctx, globalEndpoints[poolIndices[0]].Endpoints[0], w, r) {
|
||||
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -132,28 +137,29 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
byID := vars["by-id"] == "true"
|
||||
idx := -1
|
||||
|
||||
if byID {
|
||||
if i, err := strconv.Atoi(v); err == nil && i >= 0 && i < len(globalEndpoints) {
|
||||
idx = i
|
||||
}
|
||||
} else {
|
||||
idx = globalEndpoints.GetPoolIdx(v)
|
||||
}
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if !proxyDecommissionRequest(ctx, globalEndpoints[idx].Endpoints[0], w, r) {
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -178,8 +184,17 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
byID := vars["by-id"] == "true"
|
||||
idx := -1
|
||||
|
||||
if byID {
|
||||
if i, err := strconv.Atoi(v); err == nil && i >= 0 && i < len(globalEndpoints) {
|
||||
idx = i
|
||||
}
|
||||
} else {
|
||||
idx = globalEndpoints.GetPoolIdx(v)
|
||||
}
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
apiErr := toAdminAPIErr(ctx, errInvalidArgument)
|
||||
apiErr.Description = fmt.Sprintf("specified pool '%s' not found, please specify a valid pool", v)
|
||||
@@ -194,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -227,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
poolsStatus[idx] = status
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -334,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -358,5 +373,20 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
||||
// Cancel any ongoing rebalance operation
|
||||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
}
|
||||
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
|
||||
if host == "" {
|
||||
return
|
||||
}
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -32,9 +32,8 @@ import (
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
|
||||
@@ -52,9 +51,10 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
opts := getSRAddOptions(r)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -68,6 +68,11 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
|
||||
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
//
|
||||
// used internally to tell current cluster to enable SR with
|
||||
@@ -87,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -134,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
||||
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -186,13 +191,13 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/peer/bucket-meta
|
||||
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -253,9 +258,11 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaLCConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -308,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio
|
||||
if encryptionKey != "" {
|
||||
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
@@ -334,12 +340,25 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
|
||||
opts.Users = true
|
||||
opts.Policies = true
|
||||
opts.Groups = true
|
||||
opts.ILMExpiryRules = true
|
||||
}
|
||||
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
|
||||
var replicateILMExpiry bool
|
||||
for _, site := range info.Sites {
|
||||
if site.ReplicateILMExpiry {
|
||||
replicateILMExpiry = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !replicateILMExpiry {
|
||||
// explicitly send nil for ILMExpiryStats
|
||||
info.ILMExpiryStats = nil
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -383,9 +402,11 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
|
||||
|
||||
opts := getSREditOptions(r)
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -398,6 +419,12 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
|
||||
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
|
||||
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
@@ -416,7 +443,30 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SRStateEdit - PUT /minio/admin/v3/site-replication/state/edit
|
||||
//
|
||||
// used internally to tell current cluster to update site replication state
|
||||
func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var state madmin.SRStateEditReq
|
||||
if err := parseJSONBody(ctx, r.Body, &state, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -428,6 +478,8 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
opts.Policies = q.Get("policies") == "true"
|
||||
opts.Groups = q.Get("groups") == "true"
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.ILMExpiryRules = q.Get("ilm-expiry-rules") == "true"
|
||||
opts.PeerState = q.Get("peer-state") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
opts.ShowDeleted = q.Get("showDeleted") == "true"
|
||||
@@ -451,7 +503,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -482,7 +534,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -544,7 +596,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.
|
||||
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
|
||||
// would mean the network is not stable. Logging here will help in debugging network issues.
|
||||
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -567,5 +619,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
result := siteNetperf(r.Context(), duration)
|
||||
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
|
||||
@@ -27,15 +27,19 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
@@ -72,7 +76,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
@@ -269,14 +273,21 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
}
|
||||
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
// We don't allow internal group manipulation in this API when LDAP
|
||||
// is enabled for now.
|
||||
err = errIAMActionNotAllowed
|
||||
} else {
|
||||
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: updReq,
|
||||
@@ -366,7 +377,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: madmin.GroupAddRemove{
|
||||
@@ -404,7 +415,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
@@ -464,6 +475,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if !utf8.ValidString(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
@@ -493,25 +509,31 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
password := cred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var ureq madmin.AddOrUpdateUserReq
|
||||
if err = json.Unmarshal(configBytes, &ureq); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// We don't allow internal user creation with LDAP enabled for now.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
updatedAt, err := globalIAMSys.CreateUser(ctx, accessKey, ureq)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
@@ -545,14 +567,16 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
args := policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListTemporaryAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(args) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -568,11 +592,16 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
if sessionPolicy != nil {
|
||||
stsAccountPolicy = *sessionPolicy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(stsAccount.ParentUser, false)
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(stsAccount.ParentUser, stsAccount.Groups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if len(policiesNames) == 0 {
|
||||
policySet, _ := args.GetPolicies(iamPolicyClaimNameOpenID())
|
||||
policiesNames = policySet.ToSlice()
|
||||
}
|
||||
|
||||
stsAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
|
||||
}
|
||||
|
||||
@@ -607,72 +636,22 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var createReq madmin.AddServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
if hasSpaceBE(createReq.AccessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := createReq.Validate(); err != nil {
|
||||
// Since this validation would happen client side as well, we only send
|
||||
// a generic error message here.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
if createReq.AccessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
targetUser string
|
||||
targetGroups []string
|
||||
err error
|
||||
)
|
||||
|
||||
// If the request did not set a TargetUser, the service account is
|
||||
// created for the request sender.
|
||||
targetUser = createReq.TargetUser
|
||||
if targetUser == "" {
|
||||
targetUser = cred.AccessKey
|
||||
}
|
||||
|
||||
description := createReq.Description
|
||||
if description == "" {
|
||||
description = createReq.Comment
|
||||
}
|
||||
opts := newServiceAccountOpts{
|
||||
accessKey: createReq.AccessKey,
|
||||
secretKey: createReq.SecretKey,
|
||||
name: createReq.Name,
|
||||
description: description,
|
||||
expiration: createReq.Expiration,
|
||||
claims: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Find the user for the request sender (as it may be sent via a service
|
||||
// account or STS account):
|
||||
requestorUser := cred.AccessKey
|
||||
@@ -706,23 +685,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
// credentials).
|
||||
if isSvcAccForRequestor {
|
||||
// Check if adding service account is explicitly denied.
|
||||
//
|
||||
// This allows turning off service accounts for request sender,
|
||||
// if there is no deny statement this call is implicitly enabled.
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: true,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx,
|
||||
@@ -741,31 +703,23 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
opts.claims[k] = v
|
||||
}
|
||||
} else {
|
||||
// Need permission if we are creating a service account for a
|
||||
// user <> to the request sender
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: requestorUser,
|
||||
Groups: requestorGroups,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
} else if globalIAMSys.LDAPConfig.Enabled() {
|
||||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
|
||||
// NOTE: if not using LDAP, then internal IDP or open ID is
|
||||
@@ -774,19 +728,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// latter, a group notion is not supported.
|
||||
}
|
||||
|
||||
var sp *policy.Policy
|
||||
if len(createReq.Policy) > 0 {
|
||||
sp, err = policy.ParseConfig(bytes.NewReader(createReq.Policy))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if sp.Version == "" && len(sp.Statements) == 0 {
|
||||
sp = nil
|
||||
}
|
||||
}
|
||||
|
||||
opts.sessionPolicy = sp
|
||||
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -807,7 +748,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(password, data)
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -818,7 +759,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
@@ -868,25 +809,6 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.UpdateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
requestUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
requestUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if requestUser != svcAccount.ParentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
@@ -907,6 +829,35 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
err = addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Permission checks:
|
||||
//
|
||||
// 1. Any type of account (i.e. access keys (previously/still called service
|
||||
// accounts), STS accounts, internal IDP accounts, etc) with the
|
||||
// policy.UpdateServiceAccountAdminAction permission can update any service
|
||||
// account.
|
||||
//
|
||||
// 2. We would like to let a user update their own access keys, however it
|
||||
// is currently blocked pending a re-design. Users are still able to delete
|
||||
// and re-create them.
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.UpdateServiceAccountAdminAction,
|
||||
ConditionValues: condValues,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var sp *policy.Policy
|
||||
if len(updateReq.NewPolicy) > 0 {
|
||||
sp, err = policy.ParseConfig(bytes.NewReader(updateReq.NewPolicy))
|
||||
@@ -934,7 +885,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Update: &madmin.SRSvcAccUpdate{
|
||||
@@ -1010,7 +961,7 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = *sessionPolicy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, false)
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, svcAccount.Groups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1107,8 +1058,13 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Description: svc.Description,
|
||||
ParentUser: svc.ParentUser,
|
||||
Name: svc.Name,
|
||||
AccountStatus: svc.Status,
|
||||
AccessKey: svc.AccessKey,
|
||||
ImpliedPolicy: svc.IsImpliedPolicy(),
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1154,6 +1110,10 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if serviceAccount == siteReplicatorSvcAcc && globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
// We do not care if service account is readable or not at this point,
|
||||
// since this is a delete call we shall allow it to be deleted if possible.
|
||||
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
|
||||
@@ -1192,7 +1152,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Delete: &madmin.SRSvcAccDelete{
|
||||
@@ -1275,26 +1235,17 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
bucketStorageCache.Once.Do(func() {
|
||||
// Set this to 10 secs since its enough, as scanner
|
||||
// does not update the bucket usage values frequently.
|
||||
bucketStorageCache.TTL = 10 * time.Second
|
||||
|
||||
// Rely on older value if usage loading fails from disk.
|
||||
bucketStorageCache.Relax = true
|
||||
bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true},
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
}
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
v, _ := bucketStorageCache.Get()
|
||||
if v != nil {
|
||||
dataUsageInfo, _ = v.(DataUsageInfo)
|
||||
}
|
||||
dataUsageInfo, _ := bucketStorageCache.Get()
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var err error
|
||||
@@ -1357,9 +1308,9 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
effectivePolicy = globalIAMSys.GetCombinedPolicy(policySetFromClaims.ToSlice()...)
|
||||
|
||||
default:
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, false, cred.Groups...)
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1511,7 +1462,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
@@ -1541,7 +1492,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
@@ -1571,7 +1522,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// Call cluster-replication policy creation hook to replicate policy deletion to
|
||||
// other minio clusters.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicy,
|
||||
Name: policyName,
|
||||
UpdatedAt: UTCNow(),
|
||||
@@ -1634,7 +1585,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
|
||||
// Call cluster-replication policy creation hook to replicate policy to
|
||||
// other minio clusters.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicy,
|
||||
Name: policyName,
|
||||
Policy: iamPolicyBytes,
|
||||
@@ -1642,7 +1593,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
}))
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
// SetPolicyForUserOrGroup - sets a policy on a user or a group.
|
||||
//
|
||||
// PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
//
|
||||
// Deprecated: This API is replaced by attach/detach policy APIs for specific
|
||||
// type of users (builtin or LDAP).
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -1694,6 +1650,32 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
userType := regUser
|
||||
if globalIAMSys.GetUsersSysType() == LDAPUsersSysType {
|
||||
userType = stsUser
|
||||
|
||||
// Validate that the user or group exists in LDAP and use the normalized
|
||||
// form of the entityName (which will be an LDAP DN).
|
||||
var err error
|
||||
if isGroup {
|
||||
var foundGroupDN *xldap.DNSearchResult
|
||||
var underBaseDN bool
|
||||
if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
}
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
}
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
updatedAt, err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, userType, isGroup)
|
||||
@@ -1702,7 +1684,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
PolicyMapping: &madmin.SRPolicyMapping{
|
||||
UserOrGroup: entityName,
|
||||
@@ -1842,17 +1824,27 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
|
||||
}
|
||||
|
||||
const (
|
||||
allPoliciesFile = "policies.json"
|
||||
allUsersFile = "users.json"
|
||||
allGroupsFile = "groups.json"
|
||||
allSvcAcctsFile = "svcaccts.json"
|
||||
userPolicyMappingsFile = "user_mappings.json"
|
||||
groupPolicyMappingsFile = "group_mappings.json"
|
||||
stsUserPolicyMappingsFile = "stsuser_mappings.json"
|
||||
stsGroupPolicyMappingsFile = "stsgroup_mappings.json"
|
||||
iamAssetsDir = "iam-assets"
|
||||
allPoliciesFile = "policies.json"
|
||||
allUsersFile = "users.json"
|
||||
allGroupsFile = "groups.json"
|
||||
allSvcAcctsFile = "svcaccts.json"
|
||||
userPolicyMappingsFile = "user_mappings.json"
|
||||
groupPolicyMappingsFile = "group_mappings.json"
|
||||
stsUserPolicyMappingsFile = "stsuser_mappings.json"
|
||||
|
||||
iamAssetsDir = "iam-assets"
|
||||
)
|
||||
|
||||
var iamExportFiles = []string{
|
||||
allPoliciesFile,
|
||||
allUsersFile,
|
||||
allGroupsFile,
|
||||
allSvcAcctsFile,
|
||||
userPolicyMappingsFile,
|
||||
groupPolicyMappingsFile,
|
||||
stsUserPolicyMappingsFile,
|
||||
}
|
||||
|
||||
// ExportIAMHandler - exports all iam info as a zipped file
|
||||
func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@@ -1876,38 +1868,28 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
adminLogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := zipWriter.CreateHeader(header)
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
adminLogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
if _, err := io.Copy(zwriter, r); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
iamFiles := []string{
|
||||
allPoliciesFile,
|
||||
allUsersFile,
|
||||
allGroupsFile,
|
||||
allSvcAcctsFile,
|
||||
userPolicyMappingsFile,
|
||||
groupPolicyMappingsFile,
|
||||
stsUserPolicyMappingsFile,
|
||||
stsGroupPolicyMappingsFile,
|
||||
}
|
||||
for _, f := range iamFiles {
|
||||
for _, f := range iamExportFiles {
|
||||
iamFile := pathJoin(iamAssetsDir, f)
|
||||
switch f {
|
||||
case allPoliciesFile:
|
||||
allPolicies, err := globalIAMSys.ListPolicies(ctx, "")
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1986,7 +1968,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
|
||||
sa, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
@@ -2008,6 +1990,9 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
Status: acc.Credentials.Status,
|
||||
Name: sa.Name,
|
||||
Description: sa.Description,
|
||||
Expiration: &sa.Expiration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2022,13 +2007,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
case userPolicyMappingsFile:
|
||||
userPolicyMap := make(map[string]MappedPolicy)
|
||||
userPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
userPolData, err := json.Marshal(userPolicyMap)
|
||||
userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
@@ -2039,13 +2024,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
case groupPolicyMappingsFile:
|
||||
groupPolicyMap := make(map[string]MappedPolicy)
|
||||
groupPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, true, groupPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
grpPolData, err := json.Marshal(groupPolicyMap)
|
||||
grpPolData, err := json.Marshal(mappedPoliciesToMap(groupPolicyMap))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
@@ -2056,13 +2041,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
case stsUserPolicyMappingsFile:
|
||||
userPolicyMap := make(map[string]MappedPolicy)
|
||||
userPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
userPolData, err := json.Marshal(userPolicyMap)
|
||||
userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
@@ -2071,22 +2056,6 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
case stsGroupPolicyMappingsFile:
|
||||
groupPolicyMap := make(map[string]MappedPolicy)
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
grpPolData, err := json.Marshal(groupPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(grpPolData), iamFile, len(grpPolData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2256,12 +2225,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// If group does not exist, then check if the group has beginning and end space characters
|
||||
// we will reject such group names.
|
||||
if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(group) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allGroupsFile, group), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allGroupsFile, group), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -2288,6 +2257,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
@@ -2298,7 +2277,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
// service account access key cannot have space characters
|
||||
// beginning and end of the string.
|
||||
if hasSpaceBE(svcAcctReq.AccessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
@@ -2324,20 +2304,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
updateReq = false
|
||||
}
|
||||
if updateReq {
|
||||
opts := updateServiceAccountOpts{
|
||||
secretKey: svcAcctReq.SecretKey,
|
||||
status: svcAcctReq.Status,
|
||||
name: svcAcctReq.Name,
|
||||
description: svcAcctReq.Description,
|
||||
expiration: svcAcctReq.Expiration,
|
||||
sessionPolicy: sp,
|
||||
}
|
||||
_, err = globalIAMSys.UpdateServiceAccount(ctx, svcAcctReq.AccessKey, opts)
|
||||
// If the service account exists, we remove it to ensure a
|
||||
// clean import.
|
||||
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
opts := newServiceAccountOpts{
|
||||
accessKey: user,
|
||||
@@ -2350,18 +2324,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
allowSiteReplicatorAccount: false,
|
||||
}
|
||||
|
||||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
opts.claims[ldapUserN] = svcAcctReq.AccessKey // simple username
|
||||
targetUser, _, err := globalIAMSys.LDAPConfig.LookupUserDN(svcAcctReq.AccessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
}
|
||||
|
||||
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
@@ -2430,6 +2392,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for g, pm := range grpPolicyMap {
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
|
||||
@@ -2459,6 +2432,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
for u, pm := range userPolicyMap {
|
||||
// disallow setting policy mapping if user is a temporary user
|
||||
ok, _, err := globalIAMSys.IsTempUser(u)
|
||||
@@ -2470,6 +2453,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, errIAMActionNotAllowed, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
@@ -2477,33 +2461,110 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// import sts group policy mappings
|
||||
{
|
||||
f, err := zr.Open(pathJoin(iamAssetsDir, stsGroupPolicyMappingsFile))
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
case err != nil:
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
default:
|
||||
defer f.Close()
|
||||
var grpPolicyMap map[string]MappedPolicy
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
if err = json.Unmarshal(data, &grpPolicyMap); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsGroupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
for g, pm := range grpPolicyMap {
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsGroupPolicyMappingsFile, g), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
dur := exp.Sub(time.Now())
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrServerNotInitialized)
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(s3Err)
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
|
||||
}
|
||||
|
||||
var createReq madmin.AddServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
|
||||
}
|
||||
|
||||
if createReq.Expiration != nil && !createReq.Expiration.IsZero() {
|
||||
// truncate expiration at the second.
|
||||
truncateTime := createReq.Expiration.Truncate(time.Second)
|
||||
createReq.Expiration = &truncateTime
|
||||
}
|
||||
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
if hasSpaceBE(createReq.AccessKey) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
|
||||
}
|
||||
|
||||
if err := createReq.Validate(); err != nil {
|
||||
// Since this validation would happen client side as well, we only send
|
||||
// a generic error message here.
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
|
||||
}
|
||||
// If the request did not set a TargetUser, the service account is
|
||||
// created for the request sender.
|
||||
targetUser := createReq.TargetUser
|
||||
if targetUser == "" {
|
||||
targetUser = cred.AccessKey
|
||||
}
|
||||
|
||||
description := createReq.Description
|
||||
if description == "" {
|
||||
description = createReq.Comment
|
||||
}
|
||||
opts := newServiceAccountOpts{
|
||||
accessKey: createReq.AccessKey,
|
||||
secretKey: createReq.SecretKey,
|
||||
name: createReq.Name,
|
||||
description: description,
|
||||
expiration: createReq.Expiration,
|
||||
claims: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
err = addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
if err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
|
||||
// Check if action is allowed if creating access key for another user
|
||||
// Check if action is explicitly denied if for self
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: condValues,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: (targetUser == cred.AccessKey || targetUser == cred.ParentUser),
|
||||
}) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAccessDenied)
|
||||
}
|
||||
|
||||
var sp *policy.Policy
|
||||
if len(createReq.Policy) > 0 {
|
||||
sp, err = policy.ParseConfig(bytes.NewReader(createReq.Policy))
|
||||
if err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
opts.sessionPolicy = sp
|
||||
|
||||
return ctx, cred, opts, createReq, targetUser, APIError{}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -206,7 +206,9 @@ func TestIAMInternalIDPServerSuite(t *testing.T) {
|
||||
suite.TestCannedPolicies(c)
|
||||
suite.TestGroupAddRemove(c)
|
||||
suite.TestServiceAccountOpsByAdmin(c)
|
||||
suite.TestServiceAccountPrivilegeEscalationBug(c)
|
||||
suite.TestServiceAccountOpsByUser(c)
|
||||
suite.TestServiceAccountDurationSecondsCondition(c)
|
||||
suite.TestAddServiceAccountPerms(c)
|
||||
suite.TearDownSuite(c)
|
||||
},
|
||||
@@ -896,14 +898,6 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
// 3. Check S3 access
|
||||
c.assertSvcAccS3Access(ctx, s, cr, bucket)
|
||||
|
||||
// 4. Check that svc account can restrict the policy, and that the
|
||||
// session policy can be updated.
|
||||
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 4. Check that service account's secret key and account status can be
|
||||
// updated.
|
||||
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
@@ -911,6 +905,93 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Deny",
|
||||
"Action": [
|
||||
"admin:CreateServiceAccount",
|
||||
"admin:UpdateServiceAccount"
|
||||
],
|
||||
"Condition": {"NumericGreaterThan": {"svc:DurationSeconds": "3600"}}
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
|
||||
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
|
||||
Secure: s.secure,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Err creating user admin client: %v", err)
|
||||
}
|
||||
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
|
||||
|
||||
distantExpiration := time.Now().Add(30 * time.Minute)
|
||||
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: "svc-accesskey",
|
||||
SecretKey: "svc-secretkey",
|
||||
Expiration: &distantExpiration,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
|
||||
c.assertSvcAccS3Access(ctx, s, cr, bucket)
|
||||
|
||||
closeExpiration := time.Now().Add(2 * time.Hour)
|
||||
_, err = userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: "svc-accesskey",
|
||||
SecretKey: "svc-secretkey",
|
||||
Expiration: &closeExpiration,
|
||||
})
|
||||
if err == nil {
|
||||
c.Fatalf("Creating a svc acc with distant expiration should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
@@ -979,6 +1060,95 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
c.assertSvcAccDeletion(ctx, s, s.adm, accessKey, bucket)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := s.client.MakeBucket(ctx, "public", minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
err = s.client.MakeBucket(ctx, "private", minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
pubPolicyBytes := []byte(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::public",
|
||||
"arn:aws:s3:::public/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
|
||||
fullS3PolicyBytes := []byte(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
// Create a service account for the root user.
|
||||
cr, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: globalActiveCred.AccessKey,
|
||||
Policy: pubPolicyBytes,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("admin should be able to create service account for themselves %s", err)
|
||||
}
|
||||
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
|
||||
// Check that the service account can access the public bucket.
|
||||
buckets, err := svcClient.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("err fetching buckets %s", err)
|
||||
}
|
||||
if len(buckets) != 1 || buckets[0].Name != "public" {
|
||||
c.Fatalf("service account should only have access to public bucket")
|
||||
}
|
||||
|
||||
// Create an madmin client with the service account creds.
|
||||
svcAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
|
||||
Creds: credentials.NewStaticV4(cr.AccessKey, cr.SecretKey, ""),
|
||||
Secure: s.secure,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Err creating svcacct admin client: %v", err)
|
||||
}
|
||||
svcAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
|
||||
|
||||
// Attempt to update the policy on the service account.
|
||||
err = svcAdmClient.UpdateServiceAccount(ctx, cr.AccessKey,
|
||||
madmin.UpdateServiceAccountReq{
|
||||
NewPolicy: fullS3PolicyBytes,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
c.Fatalf("service account should not be able to update policy on itself")
|
||||
} else if !strings.Contains(err.Error(), "Access Denied") {
|
||||
c.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -168,6 +168,7 @@ func testServiceSignalReceiver(cmd cmdType, t *testing.T) {
|
||||
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) {
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("action", string(cmd.toServiceAction()))
|
||||
queryVal.Set("type", "2")
|
||||
resource := adminPathPrefix + adminAPIVersionPrefix + "/service?" + queryVal.Encode()
|
||||
req, err := newTestRequest(http.MethodPost, resource, 0, nil)
|
||||
if err != nil {
|
||||
@@ -220,12 +221,18 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
|
||||
resp, _ := io.ReadAll(rec.Body)
|
||||
if rec.Code != http.StatusOK {
|
||||
resp, _ := io.ReadAll(rec.Body)
|
||||
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
|
||||
http.StatusOK, rec.Code, string(resp))
|
||||
}
|
||||
|
||||
result := &serviceResult{}
|
||||
if err := json.Unmarshal(resp, result); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_ = result
|
||||
|
||||
// Wait until testServiceSignalReceiver() called in a goroutine quits.
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -341,7 +348,7 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
return v
|
||||
}
|
||||
qParmsArr := []url.Values{
|
||||
qParamsArr := []url.Values{
|
||||
// Invalid cases
|
||||
mkParams("", true, true),
|
||||
mkParams("111", true, true),
|
||||
@@ -366,9 +373,9 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
body := `{"recursive": false, "dryRun": true, "remove": false, "scanMode": 0}`
|
||||
|
||||
// Test all combinations!
|
||||
for pIdx, parms := range qParmsArr {
|
||||
for pIdx, params := range qParamsArr {
|
||||
for vIdx, vars := range varsArr {
|
||||
_, err := extractHealInitParams(vars, parms, bytes.NewReader([]byte(body)))
|
||||
_, err := extractHealInitParams(vars, params, bytes.NewReader([]byte(body)))
|
||||
isErrCase := false
|
||||
if pIdx < 4 || vIdx < 1 {
|
||||
isErrCase = true
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
@@ -62,8 +63,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
|
||||
errHealStopSignalled = errors.New("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
@@ -132,7 +133,13 @@ func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.healStatus[tracker.ID] = *tracker
|
||||
|
||||
tracker.mu.RLock()
|
||||
t := *tracker
|
||||
t.QueuedBuckets = append(make([]string, 0, len(tracker.QueuedBuckets)), tracker.QueuedBuckets...)
|
||||
t.HealedBuckets = append(make([]string, 0, len(tracker.HealedBuckets)), tracker.HealedBuckets...)
|
||||
ahs.healStatus[tracker.ID] = t
|
||||
tracker.mu.RUnlock()
|
||||
}
|
||||
|
||||
// Sort by zone, set and disk index
|
||||
@@ -322,21 +329,25 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
// Add heal state and start sequence
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing, do not spawn an unnecessary goroutine.
|
||||
} else {
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart(objAPI)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
bugLogIf(h.ctx, err)
|
||||
return nil, toAdminAPIErr(h.ctx, err), ""
|
||||
}
|
||||
return b, noError, ""
|
||||
@@ -383,7 +394,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
bugLogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
@@ -444,8 +455,8 @@ type healSequence struct {
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
// Number of total items where healing failed against item type
|
||||
healFailedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
@@ -486,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
healFailedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -530,14 +541,14 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// getHealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
@@ -545,6 +556,30 @@ func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
return retMap
|
||||
}
|
||||
|
||||
func (h *healSequence) countFailed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healFailedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countScanned(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHealed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
@@ -697,16 +732,13 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
h.countScanned(healType)
|
||||
|
||||
if source.noWait {
|
||||
select {
|
||||
case globalBackgroundHealRoutine.tasks <- task:
|
||||
if serverDebugLog {
|
||||
logger.Info("Task in the queue: %#v", task)
|
||||
fmt.Printf("Task in the queue: %#v\n", task)
|
||||
}
|
||||
default:
|
||||
// task queue is full, no more workers, we shall move on and heal later.
|
||||
@@ -723,7 +755,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
select {
|
||||
case globalBackgroundHealRoutine.tasks <- task:
|
||||
if serverDebugLog {
|
||||
logger.Info("Task in the queue: %#v", task)
|
||||
fmt.Printf("Task in the queue: %#v\n", task)
|
||||
}
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
@@ -732,37 +764,21 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
if res.err == nil {
|
||||
h.countHealed(healType)
|
||||
} else {
|
||||
h.countFailed(healType)
|
||||
}
|
||||
if !h.reportProgress {
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
@@ -800,7 +816,7 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
close(h.traverseAndHealDoneCh)
|
||||
xioutil.SafeClose(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
|
||||
@@ -810,7 +826,7 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
@@ -867,7 +883,7 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.object != "" {
|
||||
if err := h.healObject(bucket, h.object, ""); err != nil {
|
||||
if err := h.healObject(bucket, h.object, "", h.settings.ScanMode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -882,7 +898,7 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
}
|
||||
|
||||
// healObject - heal the given object and record result
|
||||
func (h *healSequence) healObject(bucket, object, versionID string) error {
|
||||
func (h *healSequence) healObject(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
@@ -19,9 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
@@ -63,20 +60,12 @@ const (
|
||||
noObjLayerFlag
|
||||
)
|
||||
|
||||
// Has checks if the the given flag is enabled in `h`.
|
||||
// Has checks if the given flag is enabled in `h`.
|
||||
func (h hFlag) Has(flag hFlag) bool {
|
||||
// Use bitwise-AND and check if the result is non-zero.
|
||||
return h&flag != 0
|
||||
}
|
||||
|
||||
func getHandlerName(f http.HandlerFunc) string {
|
||||
name := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
|
||||
name = strings.TrimPrefix(name, "github.com/minio/minio/cmd.adminAPIHandlers.")
|
||||
name = strings.TrimSuffix(name, "Handler-fm")
|
||||
name = strings.TrimSuffix(name, "-fm")
|
||||
return name
|
||||
}
|
||||
|
||||
// adminMiddleware performs some common admin handler functionality for all
|
||||
// handlers:
|
||||
//
|
||||
@@ -86,9 +75,13 @@ func getHandlerName(f http.HandlerFunc) string {
|
||||
//
|
||||
// - sets up call to send AuditLog
|
||||
//
|
||||
// Note that, while this is a middleware function (i.e. it takes a handler
|
||||
// function and returns one), due to flags being passed based on required
|
||||
// conditions, it is done per-"handler function registration" in the router.
|
||||
// While this is a middleware function (i.e. it takes a handler function and
|
||||
// returns one), due to flags being passed based on required conditions, it is
|
||||
// done per-"handler function registration" in the router.
|
||||
//
|
||||
// The passed in handler function must be a method of `adminAPIHandlers` for the
|
||||
// name displayed in logs and trace to be accurate. The name is extracted via
|
||||
// reflection.
|
||||
//
|
||||
// When no flags are passed, gzip compression, http tracing of headers and
|
||||
// checking of object layer availability are all enabled. Use flags to modify
|
||||
@@ -100,10 +93,8 @@ func adminMiddleware(f http.HandlerFunc, flags ...hFlag) http.HandlerFunc {
|
||||
handlerFlags |= flag
|
||||
}
|
||||
|
||||
// Get name of the handler using reflection. NOTE: The passed in handler
|
||||
// function must be a method of `adminAPIHandlers` for this extraction to
|
||||
// work as expected.
|
||||
handlerName := getHandlerName(f)
|
||||
// Get name of the handler using reflection.
|
||||
handlerName := getHandlerName(f, "adminAPIHandlers")
|
||||
|
||||
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
|
||||
// Update request context with `logger.ReqInfo`.
|
||||
@@ -154,9 +145,16 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
}
|
||||
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
// Restart and stop MinIO service type=2
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(adminMiddleware(adminAPI.ServiceV2Handler, traceAllFlag)).Queries("action", "{action:.*}", "type", "2")
|
||||
|
||||
// Deprecated: Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(adminMiddleware(adminAPI.ServiceHandler, traceAllFlag)).Queries("action", "{action:.*}")
|
||||
// Update MinIO servers.
|
||||
|
||||
// Update all MinIO servers type=2
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(adminMiddleware(adminAPI.ServerUpdateV2Handler, traceAllFlag)).Queries("updateURL", "{updateURL:.*}", "type", "2")
|
||||
|
||||
// Deprecated: Update MinIO servers.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(adminMiddleware(adminAPI.ServerUpdateHandler, traceAllFlag)).Queries("updateURL", "{updateURL:.*}")
|
||||
|
||||
// Info operations
|
||||
@@ -300,6 +298,12 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.GetIdentityProviderCfg))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.DeleteIdentityProviderCfg))
|
||||
|
||||
// LDAP specific service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).
|
||||
Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
|
||||
@@ -376,6 +380,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(adminMiddleware(adminAPI.SRPeerEdit))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(adminMiddleware(adminAPI.SRPeerRemove))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/resync/op").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationResyncOp)).Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/state/edit").HandlerFunc(adminMiddleware(adminAPI.SRStateEdit))
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,23 +18,24 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request, metrics bool) madmin.ServerProperties {
|
||||
addr := globalLocalNodeName
|
||||
if r != nil {
|
||||
addr = r.Host
|
||||
@@ -42,9 +43,13 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
if globalIsDistErasure {
|
||||
addr = globalLocalNodeName
|
||||
}
|
||||
poolNumbers := make(map[int]struct{})
|
||||
network := make(map[string]string)
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if endpoint.IsLocal {
|
||||
poolNumbers[endpoint.PoolIdx+1] = struct{}{}
|
||||
}
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
nodeName = addr
|
||||
@@ -59,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
} else if xnet.IsNetworkOrHostDown(err, true) {
|
||||
network[nodeName] = "connection attempt timedout"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -112,6 +119,17 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
MinioEnvVars: make(map[string]string, 10),
|
||||
}
|
||||
|
||||
for poolNumber := range poolNumbers {
|
||||
props.PoolNumbers = append(props.PoolNumbers, poolNumber)
|
||||
}
|
||||
sort.Ints(props.PoolNumbers)
|
||||
props.PoolNumber = func() int {
|
||||
if len(props.PoolNumbers) == 1 {
|
||||
return props.PoolNumbers[0]
|
||||
}
|
||||
return math.MaxInt // this indicates that its unset.
|
||||
}()
|
||||
|
||||
sensitive := map[string]struct{}{
|
||||
config.EnvAccessKey: {},
|
||||
config.EnvSecretKey: {},
|
||||
@@ -141,7 +159,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil {
|
||||
storageInfo := objLayer.LocalStorageInfo(GlobalContext)
|
||||
storageInfo := objLayer.LocalStorageInfo(GlobalContext, metrics)
|
||||
props.State = string(madmin.ItemOnline)
|
||||
props.Disks = storageInfo.Disks
|
||||
} else {
|
||||
|
||||
@@ -67,7 +67,7 @@ type ObjectToDelete struct {
|
||||
ReplicateDecisionStr string `xml:"-"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// createBucketLocationConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -47,7 +48,7 @@ import (
|
||||
levent "github.com/minio/minio/internal/config/lambda/event"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@@ -55,19 +56,23 @@ type APIError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
ObjectSize string
|
||||
RangeRequested string
|
||||
}
|
||||
|
||||
// APIErrorResponse - error response format
|
||||
type APIErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"`
|
||||
RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"`
|
||||
}
|
||||
|
||||
// APIErrorCode type of error status.
|
||||
@@ -262,6 +267,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrInvalidLifecycleQueryParameter
|
||||
ErrServerNotInitialized
|
||||
ErrBucketMetadataNotInitialized
|
||||
ErrRequestTimedout
|
||||
ErrClientDisconnected
|
||||
ErrTooManyRequests
|
||||
@@ -277,9 +283,11 @@ const (
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchUserLDAPWarn
|
||||
ErrAdminLDAPExpectedLoginName
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminGroupDisabled
|
||||
ErrAdminInvalidGroupName
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminPolicyChangeAlreadyApplied
|
||||
@@ -297,9 +305,9 @@ const (
|
||||
ErrAdminConfigLDAPValidation
|
||||
ErrAdminConfigIDPCfgNameAlreadyExists
|
||||
ErrAdminConfigIDPCfgNameDoesNotExist
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
ErrAdminLDAPNotEnabled
|
||||
|
||||
// Site-Replication errors
|
||||
ErrSiteReplicationInvalidRequest
|
||||
@@ -390,7 +398,7 @@ const (
|
||||
ErrParseExpectedIdentForGroupName
|
||||
ErrParseExpectedIdentForAlias
|
||||
ErrParseUnsupportedCallWithStar
|
||||
ErrParseNonUnaryAgregateFunctionCall
|
||||
ErrParseNonUnaryAggregateFunctionCall
|
||||
ErrParseMalformedJoin
|
||||
ErrParseExpectedIdentForAt
|
||||
ErrParseAsteriskIsNotAloneInSelectList
|
||||
@@ -418,6 +426,7 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAddUserValidUTF
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
@@ -430,6 +439,14 @@ const (
|
||||
ErrLambdaARNInvalid
|
||||
ErrLambdaARNNotFound
|
||||
|
||||
// New Codes for GetObjectAttributes and GetObjectVersionAttributes
|
||||
ErrInvalidAttributeName
|
||||
|
||||
ErrAdminNoAccessKey
|
||||
ErrAdminNoSecretKey
|
||||
|
||||
ErrIAMNotInitialized
|
||||
|
||||
apiErrCodeEnd // This is used only for the testing code
|
||||
)
|
||||
|
||||
@@ -443,9 +460,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalSite.Region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
if errCode == ErrAuthorizationHeaderMalformed {
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -952,11 +969,11 @@ var errorCodes = errorCodeMap{
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrReplicationBandwidthLimitError: {
|
||||
Code: "XMinioAdminReplicationBandwidthLimitError",
|
||||
Description: "Bandwidth limit for remote target must be atleast 100MBps",
|
||||
Description: "Bandwidth limit for remote target must be at least 100MBps",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNoExistingObjects: {
|
||||
@@ -1287,7 +1304,17 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrServerNotInitialized: {
|
||||
Code: "XMinioServerNotInitialized",
|
||||
Description: "Server not initialized, please try again.",
|
||||
Description: "Server not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrIAMNotInitialized: {
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: "IAM sub-system not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrBucketMetadataNotInitialized: {
|
||||
Code: "XMinioBucketMetadataNotInitialized",
|
||||
Description: "Bucket metadata not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrMalformedJSON: {
|
||||
@@ -1356,6 +1383,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The secret key is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoAccessKey: {
|
||||
Code: "XMinioAdminNoAccessKey",
|
||||
Description: "No access key was provided.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSecretKey: {
|
||||
Code: "XMinioAdminNoSecretKey",
|
||||
Description: "No secret key was provided.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigNoQuorum: {
|
||||
Code: "XMinioAdminConfigNoQuorum",
|
||||
Description: "Configuration update failed because server quorum was not met",
|
||||
@@ -1422,11 +1459,6 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Unable to perform the requested operation because profiling is not enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminCredentialsMismatch: {
|
||||
Code: "XMinioAdminCredentialsMismatch",
|
||||
Description: "Credentials in config mismatch with server environment variables",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrAdminBucketQuotaExceeded: {
|
||||
Code: "XMinioAdminBucketQuotaExceeded",
|
||||
Description: "Bucket quota exceeded",
|
||||
@@ -1455,7 +1487,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -1888,8 +1920,8 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrParseNonUnaryAgregateFunctionCall: {
|
||||
Code: "ParseNonUnaryAgregateFunctionCall",
|
||||
ErrParseNonUnaryAggregateFunctionCall: {
|
||||
Code: "ParseNonUnaryAggregateFunctionCall",
|
||||
Description: "Only one argument is supported for aggregate functions in the SQL expression.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
@@ -2010,7 +2042,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrAddUserInvalidArgument: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
Description: "Credential is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminResourceInvalidArgument: {
|
||||
@@ -2063,7 +2095,31 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified policy is not found.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
// Add your error structure here.
|
||||
ErrInvalidAttributeName: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid attribute name specified.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminLDAPNotEnabled: {
|
||||
Code: "XMinioLDAPNotEnabled",
|
||||
Description: "LDAP is not enabled. LDAP must be enabled to make LDAP requests.",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrAdminLDAPExpectedLoginName: {
|
||||
Code: "XMinioLDAPExpectedLoginName",
|
||||
Description: "Expected LDAP short username but was given full DN.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidGroupName: {
|
||||
Code: "XMinioInvalidGroupName",
|
||||
Description: "The group name is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserValidUTF: {
|
||||
Code: "XMinioInvalidUTF",
|
||||
Description: "Invalid UTF-8 character detected.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
@@ -2074,6 +2130,11 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Errors that are generated by net.Conn and any context errors must be handled here.
|
||||
if errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return ErrRequestTimedout
|
||||
}
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrRequestTimedout
|
||||
if contextCanceled(ctx) && errors.Is(ctx.Err(), context.Canceled) {
|
||||
@@ -2098,6 +2159,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errGroupNameContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidGroupName
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoPolicyToAttachOrDetach:
|
||||
@@ -2112,10 +2175,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
apiErr = ErrAdminInvalidSecretKey
|
||||
case auth.ErrNoAccessKeyWithSecretKey:
|
||||
apiErr = ErrAdminNoAccessKey
|
||||
case auth.ErrNoSecretKeyWithAccessKey:
|
||||
apiErr = ErrAdminNoSecretKey
|
||||
case errInvalidStorageClass:
|
||||
apiErr = ErrInvalidStorageClass
|
||||
case errErasureReadQuorum:
|
||||
@@ -2175,6 +2244,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrInvalidMaxParts
|
||||
case ioutil.ErrOverread:
|
||||
apiErr = ErrExcessData
|
||||
case errServerNotInitialized:
|
||||
apiErr = ErrServerNotInitialized
|
||||
case errBucketMetadataNotInitialized:
|
||||
apiErr = ErrBucketMetadataNotInitialized
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -2343,31 +2416,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
case dns.ErrBucketConflict:
|
||||
apiErr = ErrBucketAlreadyExists
|
||||
default:
|
||||
if _, ok := err.(tags.Error); ok {
|
||||
// tag errors are not exported, so we check their custom interface to avoid logging.
|
||||
// The correct type is inserted by toAPIError.
|
||||
apiErr = ErrInternalError
|
||||
break
|
||||
}
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
//nolint:gocritic
|
||||
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
|
||||
"request declared a Content-Length of %d but only wrote %d bytes",
|
||||
&ie, &iw); ferr != nil {
|
||||
apiErr = ErrInternalError
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
} else if ie > iw {
|
||||
if strings.Contains(err.Error(), "request declared a Content-Length") {
|
||||
apiErr = ErrIncompleteBody
|
||||
} else {
|
||||
apiErr = ErrInternalError
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2387,10 +2439,9 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
switch apiErr.Code {
|
||||
case "NotImplemented":
|
||||
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: desc,
|
||||
Description: fmt.Sprintf("%s (%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
case "XMinioBackendDown":
|
||||
@@ -2402,12 +2453,24 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
switch e := err.(type) {
|
||||
case kms.Error:
|
||||
apiErr = APIError{
|
||||
Description: e.Err.Error(),
|
||||
Code: e.APICode,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
case batchReplicationJobError:
|
||||
apiErr = APIError(e)
|
||||
apiErr = APIError{
|
||||
Description: e.Description,
|
||||
Code: e.Code,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
}
|
||||
case InvalidRange:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRange",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode,
|
||||
ObjectSize: strconv.FormatInt(e.ResourceSize, 10),
|
||||
RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd),
|
||||
}
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -2511,6 +2574,13 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
internalLogIf(ctx, err)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
}
|
||||
|
||||
@@ -2527,13 +2597,15 @@ func getAPIError(code APIErrorCode) APIError {
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region(),
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
ActualObjectSize: err.ObjectSize,
|
||||
RangeRequested: err.RangeRequested,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
@@ -30,7 +31,6 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
@@ -50,11 +50,11 @@ func setEventStreamHeaders(w http.ResponseWriter) {
|
||||
// Write http common headers
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
// Set the "Server" http header.
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO")
|
||||
w.Header().Set(xhttp.ServerInfo, MinioStoreName)
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalSite.Region; region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -68,7 +68,7 @@ func encodeResponse(response interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
bugLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
return buf.Bytes()
|
||||
@@ -86,7 +86,7 @@ func encodeResponseList(response interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xxml.Header)
|
||||
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
bugLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
return buf.Bytes()
|
||||
@@ -108,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -136,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// Set tag count if object has tags
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tags.Count() > 0 {
|
||||
if tags != nil && tags.Count() > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
|
||||
if opts.Tagging {
|
||||
// This is MinIO only extension to return back tags along with the count.
|
||||
@@ -213,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
@@ -502,8 +502,49 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
return data
|
||||
}
|
||||
|
||||
func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
m := cloneMSS(metadata)
|
||||
|
||||
switch kind, _ := crypto.IsEncrypted(metadata); kind {
|
||||
case crypto.S3:
|
||||
m[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
case crypto.S3KMS:
|
||||
m[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
|
||||
m[xhttp.AmzServerSideEncryptionKmsID] = kmsKeyIDFromMetadata(metadata)
|
||||
if kmsCtx, ok := metadata[crypto.MetaContext]; ok {
|
||||
m[xhttp.AmzServerSideEncryptionKmsContext] = kmsCtx
|
||||
}
|
||||
case crypto.SSEC:
|
||||
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
|
||||
}
|
||||
|
||||
var toRemove []string
|
||||
for k := range cleanMinioInternalMetadataKeys(m) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
toRemove = append(toRemove, k)
|
||||
continue
|
||||
}
|
||||
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
toRemove = append(toRemove, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range toRemove {
|
||||
delete(m, k)
|
||||
delete(m, strings.ToLower(k))
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
|
||||
owner := &Owner{
|
||||
@@ -532,7 +573,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -549,18 +590,11 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -600,7 +634,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
owner := &Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
@@ -620,7 +654,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -649,7 +683,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner *Owner
|
||||
if fetchOwner {
|
||||
@@ -673,7 +707,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -692,18 +726,10 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -763,8 +789,8 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
}
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0)
|
||||
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0, h)
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
@@ -865,7 +891,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
||||
}
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if statusCode < 100 || statusCode > 999 {
|
||||
logger.Error(fmt.Sprintf("invalid WriteHeader code %v", statusCode))
|
||||
bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
@@ -918,22 +944,24 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
||||
}
|
||||
|
||||
// writeErrorRespone writes error headers
|
||||
// writeErrorResponse writes error headers
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
switch err.Code {
|
||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
||||
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
}
|
||||
|
||||
switch err.Code {
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
|
||||
logger.Error(fmt.Sprintf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
err.HTTPStatusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
|
||||
@@ -18,16 +18,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/minio/console/restapi"
|
||||
consoleapi "github.com/minio/console/api"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
@@ -43,13 +40,13 @@ func setHTTPServer(h *xhttp.Server) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
func newConsoleServerFn() *restapi.Server {
|
||||
func newConsoleServerFn() *consoleapi.Server {
|
||||
globalObjLayerMutex.RLock()
|
||||
defer globalObjLayerMutex.RUnlock()
|
||||
return globalConsoleSrv
|
||||
}
|
||||
|
||||
func setConsoleSrv(srv *restapi.Server) {
|
||||
func setConsoleSrv(srv *consoleapi.Server) {
|
||||
globalObjLayerMutex.Lock()
|
||||
globalConsoleSrv = srv
|
||||
globalObjLayerMutex.Unlock()
|
||||
@@ -67,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
// objectAPIHandlers implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
}
|
||||
@@ -171,6 +168,87 @@ var rejectedBucketAPIs = []rejectedAPI{
|
||||
},
|
||||
}
|
||||
|
||||
// Set of s3 handler options as bit flags.
|
||||
type s3HFlag uint8
|
||||
|
||||
const (
|
||||
// when provided, disables Gzip compression.
|
||||
noGZS3HFlag = 1 << iota
|
||||
|
||||
// when provided, enables only tracing of headers. Otherwise, both headers
|
||||
// and body are traced.
|
||||
traceHdrsS3HFlag
|
||||
|
||||
// when provided, disables throttling via the `maxClients` middleware.
|
||||
noThrottleS3HFlag
|
||||
)
|
||||
|
||||
func (h s3HFlag) has(flag s3HFlag) bool {
|
||||
// Use bitwise-AND and check if the result is non-zero.
|
||||
return h&flag != 0
|
||||
}
|
||||
|
||||
// s3APIMiddleware - performs some common handler functionality for S3 API
|
||||
// handlers.
|
||||
//
|
||||
// It is set per-"handler function registration" in the router to allow for
|
||||
// behavior modification via flags.
|
||||
//
|
||||
// This middleware always calls `collectAPIStats` to collect API stats.
|
||||
//
|
||||
// The passed in handler function must be a method of `objectAPIHandlers` for
|
||||
// the name displayed in logs and trace to be accurate. The name is extracted
|
||||
// via reflection.
|
||||
//
|
||||
// When **no** flags are passed, the behavior is to trace both headers and body,
|
||||
// gzip the response and throttle the handler via `maxClients`. Each of these
|
||||
// can be disabled via the corresponding `s3HFlag`.
|
||||
//
|
||||
// CAUTION: for requests involving large req/resp bodies ensure to pass the
|
||||
// `traceHdrsS3HFlag`, otherwise both headers and body will be traced, causing
|
||||
// high memory usage!
|
||||
func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
||||
// Collect all flags with bitwise-OR and assign operator
|
||||
var handlerFlags s3HFlag
|
||||
for _, flag := range flags {
|
||||
handlerFlags |= flag
|
||||
}
|
||||
|
||||
// Get name of the handler using reflection.
|
||||
handlerName := getHandlerName(f, "objectAPIHandlers")
|
||||
|
||||
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
|
||||
// Wrap the actual handler with the appropriate tracing middleware.
|
||||
var tracedHandler http.HandlerFunc
|
||||
if handlerFlags.has(traceHdrsS3HFlag) {
|
||||
tracedHandler = httpTraceHdrs(f)
|
||||
} else {
|
||||
tracedHandler = httpTraceAll(f)
|
||||
}
|
||||
|
||||
// Skip wrapping with the gzip middleware if specified.
|
||||
var gzippedHandler http.HandlerFunc = tracedHandler
|
||||
if !handlerFlags.has(noGZS3HFlag) {
|
||||
gzippedHandler = gzipHandler(gzippedHandler)
|
||||
}
|
||||
|
||||
// Skip wrapping with throttling middleware if specified.
|
||||
var throttledHandler http.HandlerFunc = gzippedHandler
|
||||
if !handlerFlags.has(noThrottleS3HFlag) {
|
||||
throttledHandler = maxClients(throttledHandler)
|
||||
}
|
||||
|
||||
// Collect API stats using the API name got from reflection in
|
||||
// `getHandlerName`.
|
||||
statsCollectedHandler := collectAPIStats(handlerName, throttledHandler)
|
||||
|
||||
// Call the final handler.
|
||||
statsCollectedHandler(w, r)
|
||||
}
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
@@ -208,12 +286,6 @@ func registerAPIRouter(router *mux.Router) {
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err != nil {
|
||||
// Static params, so this is very unlikely.
|
||||
logger.Fatal(err, "Unable to initialize server")
|
||||
}
|
||||
|
||||
for _, router := range routers {
|
||||
// Register all rejected object APIs
|
||||
for _, r := range rejectedObjAPIs {
|
||||
@@ -225,237 +297,307 @@ func registerAPIRouter(router *mux.Router) {
|
||||
|
||||
// Object operations
|
||||
// HeadObject
|
||||
router.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("headobject", maxClients(gz(httpTraceAll(api.HeadObjectHandler)))))
|
||||
router.Methods(http.MethodHead).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.HeadObjectHandler))
|
||||
|
||||
// GetObjectAttributes
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectAttributesHandler, traceHdrsS3HFlag)).
|
||||
Queries("attributes", "")
|
||||
|
||||
// CopyObjectPart
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(gz(httpTraceAll(api.CopyObjectPartHandler))))).
|
||||
HandlerFunc(s3APIMiddleware(api.CopyObjectPartHandler)).
|
||||
Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectpart", maxClients(gz(httpTraceHdrs(api.PutObjectPartHandler))))).Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectPartHandler, traceHdrsS3HFlag)).
|
||||
Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("listobjectparts", maxClients(gz(httpTraceAll(api.ListObjectPartsHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.ListObjectPartsHandler)).
|
||||
Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("completemultipartupload", maxClients(gz(httpTraceAll(api.CompleteMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.CompleteMultipartUploadHandler)).
|
||||
Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("newmultipartupload", maxClients(gz(httpTraceAll(api.NewMultipartUploadHandler))))).Queries("uploads", "")
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.NewMultipartUploadHandler)).
|
||||
Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("abortmultipartupload", maxClients(gz(httpTraceAll(api.AbortMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.AbortMultipartUploadHandler)).
|
||||
Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjectacl", maxClients(gz(httpTraceHdrs(api.GetObjectACLHandler))))).Queries("acl", "")
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectACLHandler, traceHdrsS3HFlag)).
|
||||
Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectacl", maxClients(gz(httpTraceHdrs(api.PutObjectACLHandler))))).Queries("acl", "")
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectACLHandler, traceHdrsS3HFlag)).
|
||||
Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjecttagging", maxClients(gz(httpTraceHdrs(api.GetObjectTaggingHandler))))).Queries("tagging", "")
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectTaggingHandler, traceHdrsS3HFlag)).
|
||||
Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjecttagging", maxClients(gz(httpTraceHdrs(api.PutObjectTaggingHandler))))).Queries("tagging", "")
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectTaggingHandler, traceHdrsS3HFlag)).
|
||||
Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("deleteobjecttagging", maxClients(gz(httpTraceHdrs(api.DeleteObjectTaggingHandler))))).Queries("tagging", "")
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteObjectTaggingHandler, traceHdrsS3HFlag)).
|
||||
Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("selectobjectcontent", maxClients(gz(httpTraceHdrs(api.SelectObjectContentHandler))))).Queries("select", "").Queries("select-type", "2")
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.SelectObjectContentHandler, traceHdrsS3HFlag)).
|
||||
Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjectretention", maxClients(gz(httpTraceAll(api.GetObjectRetentionHandler))))).Queries("retention", "")
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectRetentionHandler)).
|
||||
Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjectlegalhold", maxClients(gz(httpTraceAll(api.GetObjectLegalHoldHandler))))).Queries("legal-hold", "")
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectLegalHoldHandler)).
|
||||
Queries("legal-hold", "")
|
||||
// GetObject with lambda ARNs
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectLambdaHandler))))).Queries("lambdaArn", "{lambdaArn:.*}")
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectLambdaHandler, traceHdrsS3HFlag)).
|
||||
Queries("lambdaArn", "{lambdaArn:.*}")
|
||||
// GetObject
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobject", maxClients(gz(httpTraceHdrs(api.GetObjectHandler)))))
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.GetObjectHandler, traceHdrsS3HFlag))
|
||||
// CopyObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(gz(httpTraceAll(api.CopyObjectHandler)))))
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(s3APIMiddleware(api.CopyObjectHandler))
|
||||
// PutObjectRetention
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectretention", maxClients(gz(httpTraceAll(api.PutObjectRetentionHandler))))).Queries("retention", "")
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectRetentionHandler)).
|
||||
Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectlegalhold", maxClients(gz(httpTraceAll(api.PutObjectLegalHoldHandler))))).Queries("legal-hold", "")
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectLegalHoldHandler)).
|
||||
Queries("legal-hold", "")
|
||||
|
||||
// PutObject with auto-extract support for zip
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzSnowballExtract, "true").HandlerFunc(
|
||||
collectAPIStats("putobject", maxClients(gz(httpTraceHdrs(api.PutObjectExtractHandler)))))
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzSnowballExtract, "true").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag))
|
||||
|
||||
// PutObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobject", maxClients(gz(httpTraceHdrs(api.PutObjectHandler)))))
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag))
|
||||
|
||||
// DeleteObject
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("deleteobject", maxClients(gz(httpTraceAll(api.DeleteObjectHandler)))))
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteObjectHandler))
|
||||
|
||||
// PostRestoreObject
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PostRestoreObjectHandler)).
|
||||
Queries("restore", "")
|
||||
|
||||
// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLocationHandler)).
|
||||
Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketpolicy", maxClients(gz(httpTraceAll(api.GetBucketPolicyHandler))))).Queries("policy", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketPolicyHandler)).
|
||||
Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlifecycle", maxClients(gz(httpTraceAll(api.GetBucketLifecycleHandler))))).Queries("lifecycle", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLifecycleHandler)).
|
||||
Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketencryption", maxClients(gz(httpTraceAll(api.GetBucketEncryptionHandler))))).Queries("encryption", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketEncryptionHandler)).
|
||||
Queries("encryption", "")
|
||||
// GetBucketObjectLockConfig
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketobjectlockconfiguration", maxClients(gz(httpTraceAll(api.GetBucketObjectLockConfigHandler))))).Queries("object-lock", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketObjectLockConfigHandler)).
|
||||
Queries("object-lock", "")
|
||||
// GetBucketReplicationConfig
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.GetBucketReplicationConfigHandler))))).Queries("replication", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketReplicationConfigHandler)).
|
||||
Queries("replication", "")
|
||||
// GetBucketVersioning
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketversioning", maxClients(gz(httpTraceAll(api.GetBucketVersioningHandler))))).Queries("versioning", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketVersioningHandler)).
|
||||
Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketnotification", maxClients(gz(httpTraceAll(api.GetBucketNotificationHandler))))).Queries("notification", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketNotificationHandler)).
|
||||
Queries("notification", "")
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", gz(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstatus", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStatusHandler))))).Queries("replication-reset-status", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ResetBucketReplicationStatusHandler)).
|
||||
Queries("replication-reset-status", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketacl", maxClients(gz(httpTraceAll(api.GetBucketACLHandler))))).Queries("acl", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketACLHandler)).
|
||||
Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketacl", maxClients(gz(httpTraceAll(api.PutBucketACLHandler))))).Queries("acl", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketACLHandler)).
|
||||
Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketcors", maxClients(gz(httpTraceAll(api.GetBucketCorsHandler))))).Queries("cors", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketwebsite", maxClients(gz(httpTraceAll(api.GetBucketWebsiteHandler))))).Queries("website", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)).
|
||||
Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketaccelerate", maxClients(gz(httpTraceAll(api.GetBucketAccelerateHandler))))).Queries("accelerate", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketAccelerateHandler)).
|
||||
Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketrequestpayment", maxClients(gz(httpTraceAll(api.GetBucketRequestPaymentHandler))))).Queries("requestPayment", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketRequestPaymentHandler)).
|
||||
Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlogging", maxClients(gz(httpTraceAll(api.GetBucketLoggingHandler))))).Queries("logging", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)).
|
||||
Queries("logging", "")
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)).
|
||||
Queries("tagging", "")
|
||||
// DeleteBucketWebsiteHandler
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketWebsiteHandler)).
|
||||
Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebuckettagging", maxClients(gz(httpTraceAll(api.DeleteBucketTaggingHandler))))).Queries("tagging", "")
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketTaggingHandler)).
|
||||
Queries("tagging", "")
|
||||
|
||||
// ListMultipartUploads
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listmultipartuploads", maxClients(gz(httpTraceAll(api.ListMultipartUploadsHandler))))).Queries("uploads", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListMultipartUploadsHandler)).
|
||||
Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv2M", maxClients(gz(httpTraceAll(api.ListObjectsV2MHandler))))).Queries("list-type", "2", "metadata", "true")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListObjectsV2MHandler)).
|
||||
Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv2", maxClients(gz(httpTraceAll(api.ListObjectsV2Handler))))).Queries("list-type", "2")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListObjectsV2Handler)).
|
||||
Queries("list-type", "2")
|
||||
// ListObjectVersions
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectversions", maxClients(gz(httpTraceAll(api.ListObjectVersionsMHandler))))).Queries("versions", "", "metadata", "true")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListObjectVersionsMHandler)).
|
||||
Queries("versions", "", "metadata", "true")
|
||||
// ListObjectVersions
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectversions", maxClients(gz(httpTraceAll(api.ListObjectVersionsHandler))))).Queries("versions", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListObjectVersionsHandler)).
|
||||
Queries("versions", "")
|
||||
// GetBucketPolicyStatus
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getpolicystatus", maxClients(gz(httpTraceAll(api.GetBucketPolicyStatusHandler))))).Queries("policyStatus", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketPolicyStatusHandler)).
|
||||
Queries("policyStatus", "")
|
||||
// PutBucketLifecycle
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketlifecycle", maxClients(gz(httpTraceAll(api.PutBucketLifecycleHandler))))).Queries("lifecycle", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketLifecycleHandler)).
|
||||
Queries("lifecycle", "")
|
||||
// PutBucketReplicationConfig
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.PutBucketReplicationConfigHandler))))).Queries("replication", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketReplicationConfigHandler)).
|
||||
Queries("replication", "")
|
||||
// PutBucketEncryption
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketencryption", maxClients(gz(httpTraceAll(api.PutBucketEncryptionHandler))))).Queries("encryption", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketEncryptionHandler)).
|
||||
Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketpolicy", maxClients(gz(httpTraceAll(api.PutBucketPolicyHandler))))).Queries("policy", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketPolicyHandler)).
|
||||
Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketobjectlockconfig", maxClients(gz(httpTraceAll(api.PutBucketObjectLockConfigHandler))))).Queries("object-lock", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketObjectLockConfigHandler)).
|
||||
Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbuckettagging", maxClients(gz(httpTraceAll(api.PutBucketTaggingHandler))))).Queries("tagging", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketTaggingHandler)).
|
||||
Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketversioning", maxClients(gz(httpTraceAll(api.PutBucketVersioningHandler))))).Queries("versioning", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketVersioningHandler)).
|
||||
Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketnotification", maxClients(gz(httpTraceAll(api.PutBucketNotificationHandler))))).Queries("notification", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketNotificationHandler)).
|
||||
Queries("notification", "")
|
||||
// ResetBucketReplicationStart - MinIO extension API
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstart", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStartHandler))))).Queries("replication-reset", "")
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.ResetBucketReplicationStartHandler)).
|
||||
Queries("replication-reset", "")
|
||||
|
||||
// PutBucket
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucket", maxClients(gz(httpTraceAll(api.PutBucketHandler)))))
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketHandler))
|
||||
// HeadBucket
|
||||
router.Methods(http.MethodHead).HandlerFunc(
|
||||
collectAPIStats("headbucket", maxClients(gz(httpTraceAll(api.HeadBucketHandler)))))
|
||||
router.Methods(http.MethodHead).
|
||||
HandlerFunc(s3APIMiddleware(api.HeadBucketHandler))
|
||||
// PostPolicy
|
||||
router.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, _ *mux.RouteMatch) bool {
|
||||
return isRequestPostPolicySignatureV4(r)
|
||||
}).HandlerFunc(collectAPIStats("postpolicybucket", maxClients(gz(httpTraceHdrs(api.PostPolicyBucketHandler)))))
|
||||
router.Methods(http.MethodPost).
|
||||
MatcherFunc(func(r *http.Request, _ *mux.RouteMatch) bool {
|
||||
return isRequestPostPolicySignatureV4(r)
|
||||
}).
|
||||
HandlerFunc(s3APIMiddleware(api.PostPolicyBucketHandler, traceHdrsS3HFlag))
|
||||
// DeleteMultipleObjects
|
||||
router.Methods(http.MethodPost).HandlerFunc(
|
||||
collectAPIStats("deletemultipleobjects", maxClients(gz(httpTraceAll(api.DeleteMultipleObjectsHandler))))).Queries("delete", "")
|
||||
router.Methods(http.MethodPost).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteMultipleObjectsHandler)).
|
||||
Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketpolicy", maxClients(gz(httpTraceAll(api.DeleteBucketPolicyHandler))))).Queries("policy", "")
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketPolicyHandler)).
|
||||
Queries("policy", "")
|
||||
// DeleteBucketReplication
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.DeleteBucketReplicationConfigHandler))))).Queries("replication", "")
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketReplicationConfigHandler)).
|
||||
Queries("replication", "")
|
||||
// DeleteBucketLifecycle
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketlifecycle", maxClients(gz(httpTraceAll(api.DeleteBucketLifecycleHandler))))).Queries("lifecycle", "")
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketLifecycleHandler)).
|
||||
Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketencryption", maxClients(gz(httpTraceAll(api.DeleteBucketEncryptionHandler))))).Queries("encryption", "")
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketEncryptionHandler)).
|
||||
Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucket", maxClients(gz(httpTraceAll(api.DeleteBucketHandler)))))
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketHandler))
|
||||
|
||||
// MinIO extension API for replication.
|
||||
//
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketreplicationmetrics", maxClients(gz(httpTraceAll(api.GetBucketReplicationMetricsV2Handler))))).Queries("replication-metrics", "2")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketReplicationMetricsV2Handler)).
|
||||
Queries("replication-metrics", "2")
|
||||
// deprecated handler
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketreplicationmetrics", maxClients(gz(httpTraceAll(api.GetBucketReplicationMetricsHandler))))).Queries("replication-metrics", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketReplicationMetricsHandler)).
|
||||
Queries("replication-metrics", "")
|
||||
|
||||
// ValidateBucketReplicationCreds
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("checkbucketreplicationconfiguration", maxClients(gz(httpTraceAll(api.ValidateBucketReplicationCredsHandler))))).Queries("replication-check", "")
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ValidateBucketReplicationCredsHandler)).
|
||||
Queries("replication-check", "")
|
||||
|
||||
// Register rejected bucket APIs
|
||||
for _, r := range rejectedBucketAPIs {
|
||||
@@ -465,24 +607,25 @@ func registerAPIRouter(router *mux.Router) {
|
||||
}
|
||||
|
||||
// S3 ListObjectsV1 (Legacy)
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListObjectsV1Handler))
|
||||
}
|
||||
|
||||
// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", gz(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listbuckets", maxClients(gz(httpTraceAll(api.ListBucketsHandler)))))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
|
||||
HandlerFunc(s3APIMiddleware(api.ListBucketsHandler))
|
||||
|
||||
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
|
||||
// than failing with UnknownAPIRequest we simply handle it for now.
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listbuckets", maxClients(gz(httpTraceAll(api.ListBucketsHandler)))))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).
|
||||
HandlerFunc(s3APIMiddleware(api.ListBucketsHandler))
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
|
||||
@@ -18,6 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -100,3 +104,16 @@ func s3EncodeName(name, encodingType string) string {
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// getHandlerName returns the name of the handler function. It takes the type
|
||||
// name as a string to clean up the name retrieved via reflection. This function
|
||||
// only works correctly when the type is present in the cmd package.
|
||||
func getHandlerName(f http.HandlerFunc, cmdType string) string {
|
||||
name := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
|
||||
|
||||
packageName := fmt.Sprintf("github.com/minio/minio/cmd.%s.", cmdType)
|
||||
name = strings.TrimPrefix(name, packageName)
|
||||
name = strings.TrimSuffix(name, "Handler-fm")
|
||||
name = strings.TrimSuffix(name, "-fm")
|
||||
return name
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -41,7 +41,7 @@ import (
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mcontext"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
@@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) {
|
||||
var err error
|
||||
r.Form, err = url.ParseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
logger.LogIf(r.Context(), err)
|
||||
authNLogIf(r.Context(), err)
|
||||
return authTypeUnknown
|
||||
}
|
||||
}
|
||||
@@ -162,7 +162,8 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
// Get credential information from the request.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
@@ -177,7 +178,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
@@ -256,7 +257,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
authNLogIf(GlobalContext, err, logger.ErrorKind)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
@@ -293,7 +294,21 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
// Expired credentials must return error right away.
|
||||
if cred.IsTemp() && cred.IsExpired() {
|
||||
return nil, toAPIErrorCode(r.Context(), errInvalidAccessKeyID)
|
||||
}
|
||||
secret := globalActiveCred.SecretKey
|
||||
if globalSiteReplicationSys.isEnabled() && cred.AccessKey != siteReplicatorSvcAcc {
|
||||
nsecret, err := getTokenSigningKey()
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
// sign root's temporary accounts also with site replicator creds
|
||||
if cred.ParentUser != globalActiveCred.AccessKey || cred.IsTemp() {
|
||||
secret = nsecret
|
||||
}
|
||||
}
|
||||
if cred.IsServiceAccount() {
|
||||
token = cred.SessionToken
|
||||
secret = cred.SecretKey
|
||||
@@ -338,7 +353,7 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po
|
||||
|
||||
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
if logger.GetReqInfo(ctx) == nil {
|
||||
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.Minio)
|
||||
bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@@ -353,7 +368,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -369,7 +384,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
// region is valid only for CreateBucketAction.
|
||||
var region string
|
||||
@@ -377,7 +392,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
// To extract region from XML in request body, get copy of request body.
|
||||
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
authZLogIf(ctx, err, logger.ErrorKind)
|
||||
return ErrMalformedXML
|
||||
}
|
||||
|
||||
@@ -669,7 +684,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
@@ -730,7 +745,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrSignatureVersionNotSupported
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
@@ -237,7 +237,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
|
||||
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature version v4 detection.
|
||||
func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
||||
testCases := []struct {
|
||||
inputQueryKey string
|
||||
@@ -287,7 +287,7 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
@@ -298,7 +298,7 @@ func mustNewSignedV2Request(method string, urlStr string, contentLength int64, b
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
@@ -309,7 +309,7 @@ func mustNewPresignedV2Request(method string, urlStr string, contentLength int64
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalActiveCred
|
||||
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
@@ -320,7 +320,7 @@ func mustNewPresignedRequest(method string, urlStr string, contentLength int64,
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalActiveCred
|
||||
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
@@ -491,7 +491,7 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
|
||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
_, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
||||
if s3Error != testCase.ErrCode {
|
||||
|
||||
@@ -25,8 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
@@ -101,17 +100,17 @@ func waitForLowHTTPReq() {
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
bgSeq := newBgHealSequence()
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(bgSeq, objAPI)
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *healSequence) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
@@ -136,8 +135,18 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
if task.respCh != nil {
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
// when respCh is not set caller is not waiting but we
|
||||
// update the relevant metrics for them
|
||||
if bgSeq != nil {
|
||||
if err == nil {
|
||||
bgSeq.countHealed(res.Type)
|
||||
} else {
|
||||
bgSeq.countFailed(res.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -149,7 +158,7 @@ func newHealRoutine() *healRoutine {
|
||||
|
||||
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
|
||||
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
|
||||
bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
|
||||
} else {
|
||||
workers = numHealers
|
||||
}
|
||||
|
||||
@@ -33,8 +33,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -87,6 +86,8 @@ type healingTracker struct {
|
||||
// ID of the current healing operation
|
||||
HealID string
|
||||
|
||||
ItemsSkipped uint64
|
||||
BytesSkipped uint64
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
@@ -140,14 +141,14 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h healingTracker) getLastUpdate() time.Time {
|
||||
func (h *healingTracker) getLastUpdate() time.Time {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
return h.LastUpdate
|
||||
}
|
||||
|
||||
func (h healingTracker) getBucket() string {
|
||||
func (h *healingTracker) getBucket() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -161,7 +162,7 @@ func (h *healingTracker) setBucket(bucket string) {
|
||||
h.Bucket = bucket
|
||||
}
|
||||
|
||||
func (h healingTracker) getObject() string {
|
||||
func (h *healingTracker) getObject() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -175,14 +176,18 @@ func (h *healingTracker) setObject(object string) {
|
||||
h.Object = object
|
||||
}
|
||||
|
||||
func (h *healingTracker) updateProgress(success bool, bytes uint64) {
|
||||
func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
if success {
|
||||
switch {
|
||||
case success:
|
||||
h.ItemsHealed++
|
||||
h.BytesDone += bytes
|
||||
} else {
|
||||
case skipped:
|
||||
h.ItemsSkipped++
|
||||
h.BytesSkipped += bytes
|
||||
default:
|
||||
h.ItemsFailed++
|
||||
h.BytesFailed += bytes
|
||||
}
|
||||
@@ -232,7 +237,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
|
||||
pathJoin(bucketMetaPrefix, healingTrackerFilename),
|
||||
DeleteOptions{
|
||||
Recursive: false,
|
||||
Force: false,
|
||||
Immediate: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -323,8 +328,10 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
ObjectsTotalCount: h.ObjectsTotalCount,
|
||||
ObjectsTotalSize: h.ObjectsTotalSize,
|
||||
ItemsHealed: h.ItemsHealed,
|
||||
ItemsSkipped: h.ItemsSkipped,
|
||||
ItemsFailed: h.ItemsFailed,
|
||||
BytesDone: h.BytesDone,
|
||||
BytesSkipped: h.BytesSkipped,
|
||||
BytesFailed: h.BytesFailed,
|
||||
Bucket: h.Bucket,
|
||||
Object: h.Object,
|
||||
@@ -345,18 +352,17 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
globalLocalDrives := globalLocalDrives
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
for _, disk := range globalLocalDrives {
|
||||
for _, disk := range localDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
@@ -377,25 +383,10 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
||||
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: %w, %s", err, endpoint)
|
||||
}
|
||||
defer disk.Close()
|
||||
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||
if poolIdx < 0 {
|
||||
return fmt.Errorf("unexpected pool index (%d) found for %s", poolIdx, disk.Endpoint())
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
z.serverPools[poolIdx].erasureDisksMu.RLock()
|
||||
setIdx, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
|
||||
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if setIdx < 0 {
|
||||
return fmt.Errorf("unexpected set index (%d) found for %s", setIdx, disk.Endpoint())
|
||||
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
|
||||
disk := getStorageViaEndpoint(endpoint)
|
||||
if disk == nil {
|
||||
return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint)
|
||||
}
|
||||
|
||||
// Prevent parallel erasure set healing
|
||||
@@ -417,11 +408,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
return nil
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
|
||||
healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
|
||||
tracker = initHealingTracker(disk, mustGetUUID())
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint))
|
||||
healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
|
||||
// Buckets data are dispersed in multiple pools/sets, make
|
||||
@@ -441,10 +432,6 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing drive '%v' on %s pool, belonging to %s erasure set", disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1))
|
||||
}
|
||||
|
||||
// Load bucket totals
|
||||
cache := dataUsageCache{}
|
||||
if err := cache.load(ctx, z.serverPools[poolIdx].sets[setIdx], dataUsageCacheName); err == nil {
|
||||
@@ -464,23 +451,15 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return err
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
logger.Info("Healing of drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
} else {
|
||||
logger.Info("Healing of drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
}
|
||||
|
||||
if len(tracker.QueuedBuckets) > 0 {
|
||||
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
|
||||
}
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.Info("\n")
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
if tracker.HealID == "" { // HealID was empty only before Feb 2023
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
bugLogIf(ctx, tracker.delete(ctx))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -491,10 +470,14 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
healingLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -529,7 +512,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
||||
// Reformat disks immediately
|
||||
_, err := z.HealFormat(context.Background(), false)
|
||||
if err != nil && !errors.Is(err, errNoHealRequired) {
|
||||
logger.LogIf(ctx, err)
|
||||
healingLogIf(ctx, err)
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
continue
|
||||
|
||||
@@ -188,6 +188,18 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "HealID")
|
||||
return
|
||||
}
|
||||
case "ItemsSkipped":
|
||||
z.ItemsSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "BytesSkipped":
|
||||
z.BytesSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -201,9 +213,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 23
|
||||
// map header, size 25
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x17, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -446,15 +458,35 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "HealID")
|
||||
return
|
||||
}
|
||||
// write "ItemsSkipped"
|
||||
err = en.Append(0xac, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ItemsSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ItemsSkipped")
|
||||
return
|
||||
}
|
||||
// write "BytesSkipped"
|
||||
err = en.Append(0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.BytesSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 23
|
||||
// map header, size 25
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x17, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
@@ -528,6 +560,12 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "HealID"
|
||||
o = append(o, 0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.HealID)
|
||||
// string "ItemsSkipped"
|
||||
o = append(o, 0xac, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ItemsSkipped)
|
||||
// string "BytesSkipped"
|
||||
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesSkipped)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -713,6 +751,18 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "HealID")
|
||||
return
|
||||
}
|
||||
case "ItemsSkipped":
|
||||
z.ItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "BytesSkipped":
|
||||
z.BytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -735,6 +785,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID)
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
|
||||
return
|
||||
}
|
||||
|
||||
758
cmd/batch-expire.go
Normal file
758
cmd/batch-expire.go
Normal file
@@ -0,0 +1,758 @@
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// expire: # Expire objects that match a condition
|
||||
// apiVersion: v1
|
||||
// bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
// prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
// rules:
|
||||
// - type: object # regular objects with zero or more older versions
|
||||
// name: NAME # match object names that satisfy the wildcard expression.
|
||||
// olderThan: 70h # match objects older than this value
|
||||
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
// tags:
|
||||
// - key: name
|
||||
// value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
// metadata:
|
||||
// - key: content-type
|
||||
// value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
// size:
|
||||
// lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
// greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
// purge:
|
||||
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
// # retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
//
|
||||
// - type: deleted # objects with delete marker as their latest version
|
||||
// name: NAME # match object names that satisfy the wildcard expression.
|
||||
// olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
// purge:
|
||||
// # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
// # retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
//
|
||||
// notify:
|
||||
// endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
// token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
//
|
||||
// retry:
|
||||
// attempts: 10 # number of retries for the job before giving up
|
||||
// delay: 500ms # least amount of delay between each retry
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// BatchJobExpirePurge type accepts non-negative versions to be retained
|
||||
type BatchJobExpirePurge struct {
|
||||
line, col int
|
||||
RetainVersions int `yaml:"retainVersions" json:"retainVersions"`
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobExpirePurge{}
|
||||
|
||||
// UnmarshalYAML - BatchJobExpirePurge extends unmarshal to extract line, col
|
||||
func (p *BatchJobExpirePurge) UnmarshalYAML(val *yaml.Node) error {
|
||||
type purge BatchJobExpirePurge
|
||||
var tmp purge
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*p = BatchJobExpirePurge(tmp)
|
||||
p.line, p.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns nil if value is valid, ie > 0.
|
||||
func (p BatchJobExpirePurge) Validate() error {
|
||||
if p.RetainVersions < 0 {
|
||||
return BatchJobYamlErr{
|
||||
line: p.line,
|
||||
col: p.col,
|
||||
msg: "retainVersions must be >= 0",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobExpireFilter holds all the filters currently supported for batch replication
|
||||
type BatchJobExpireFilter struct {
|
||||
line, col int
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
Size BatchJobSizeFilter `yaml:"size" json:"size"`
|
||||
Type string `yaml:"type" json:"type"`
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Purge BatchJobExpirePurge `yaml:"purge" json:"purge"`
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobExpireFilter{}
|
||||
|
||||
// UnmarshalYAML - BatchJobExpireFilter extends unmarshal to extract line, col
|
||||
// information
|
||||
func (ef *BatchJobExpireFilter) UnmarshalYAML(value *yaml.Node) error {
|
||||
type expFilter BatchJobExpireFilter
|
||||
var tmp expFilter
|
||||
err := value.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ef = BatchJobExpireFilter(tmp)
|
||||
ef.line, ef.col = value.Line, value.Column
|
||||
return err
|
||||
}
|
||||
|
||||
// Matches returns true if obj matches the filter conditions specified in ef.
|
||||
func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
switch ef.Type {
|
||||
case BatchJobExpireObject:
|
||||
if obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
case BatchJobExpireDeleted:
|
||||
if !obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// we should never come here, Validate should have caught this.
|
||||
batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
|
||||
return false
|
||||
}
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.CreatedBefore != nil && !obj.ModTime.Before(*ef.CreatedBefore) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(ef.Tags) > 0 && !obj.DeleteMarker {
|
||||
// Only parse object tags if tags filter is specified.
|
||||
var tagMap map[string]string
|
||||
if len(obj.UserTags) != 0 {
|
||||
t, err := tags.ParseObjectTags(obj.UserTags)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
tagMap = t.ToMap()
|
||||
}
|
||||
|
||||
for _, kv := range ef.Tags {
|
||||
// Object (version) must match all tags specified in
|
||||
// the filter
|
||||
var match bool
|
||||
for t, v := range tagMap {
|
||||
if kv.Match(BatchJobKV{Key: t, Value: v}) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
|
||||
for _, kv := range ef.Metadata {
|
||||
// Object (version) must match all x-amz-meta and
|
||||
// standard metadata headers
|
||||
// specified in the filter
|
||||
var match bool
|
||||
for k, v := range obj.UserDefined {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
if kv.Match(BatchJobKV{Key: k, Value: v}) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ef.Size.InRange(obj.Size)
|
||||
}
|
||||
|
||||
const (
|
||||
// BatchJobExpireObject - object type
|
||||
BatchJobExpireObject string = "object"
|
||||
// BatchJobExpireDeleted - delete marker type
|
||||
BatchJobExpireDeleted string = "deleted"
|
||||
)
|
||||
|
||||
// Validate returns nil if ef has valid fields, validation error otherwise.
|
||||
func (ef BatchJobExpireFilter) Validate() error {
|
||||
switch ef.Type {
|
||||
case BatchJobExpireObject:
|
||||
case BatchJobExpireDeleted:
|
||||
if len(ef.Tags) > 0 || len(ef.Metadata) > 0 {
|
||||
return BatchJobYamlErr{
|
||||
line: ef.line,
|
||||
col: ef.col,
|
||||
msg: "delete type filter can't have tags or metadata",
|
||||
}
|
||||
}
|
||||
default:
|
||||
return BatchJobYamlErr{
|
||||
line: ef.line,
|
||||
col: ef.col,
|
||||
msg: "invalid batch-expire type",
|
||||
}
|
||||
}
|
||||
|
||||
for _, tag := range ef.Tags {
|
||||
if err := tag.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, meta := range ef.Metadata {
|
||||
if err := meta.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ef.Purge.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ef.Size.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if ef.CreatedBefore != nil && !ef.CreatedBefore.Before(time.Now()) {
|
||||
return BatchJobYamlErr{
|
||||
line: ef.line,
|
||||
col: ef.col,
|
||||
msg: "CreatedBefore is in the future",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobExpire represents configuration parameters for a batch expiration
|
||||
// job typically supplied in yaml form
|
||||
type BatchJobExpire struct {
|
||||
line, col int
|
||||
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
|
||||
Retry BatchJobRetry `yaml:"retry" json:"retry"`
|
||||
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobExpire{}
|
||||
|
||||
// UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information.
|
||||
func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error {
|
||||
type expireJob BatchJobExpire
|
||||
var tmp expireJob
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*r = BatchJobExpire(tmp)
|
||||
r.line, r.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notify notifies notification endpoint if configured regarding job failure or success.
|
||||
func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
|
||||
if r.NotificationCfg.Endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.NotificationCfg.Endpoint, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.NotificationCfg.Token != "" {
|
||||
req.Header.Set("Authorization", r.NotificationCfg.Token)
|
||||
}
|
||||
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport()}
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Expire expires object versions which have already matched supplied filter conditions
|
||||
func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versioning.Versioning, objsToDel []ObjectToDelete) []error {
|
||||
opts := ObjectOptions{
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
|
||||
return errs
|
||||
}
|
||||
|
||||
const (
|
||||
batchExpireName = "batch-expire.bin"
|
||||
batchExpireFormat = 1
|
||||
batchExpireVersionV1 = 1
|
||||
batchExpireVersion = batchExpireVersionV1
|
||||
batchExpireAPIVersion = "v1"
|
||||
batchExpireJobDefaultRetries = 3
|
||||
batchExpireJobDefaultRetryDelay = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
type objInfoCache map[string]*ObjectInfo
|
||||
|
||||
func newObjInfoCache() objInfoCache {
|
||||
return objInfoCache(make(map[string]*ObjectInfo))
|
||||
}
|
||||
|
||||
func (oiCache objInfoCache) Add(toDel ObjectToDelete, oi *ObjectInfo) {
|
||||
oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)] = oi
|
||||
}
|
||||
|
||||
func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
|
||||
oi, ok := oiCache[fmt.Sprintf("%s-%s", toDel.ObjectName, toDel.VersionID)]
|
||||
return oi, ok
|
||||
}
|
||||
|
||||
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
|
||||
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
|
||||
retryAttempts := r.Retry.Attempts
|
||||
delay := job.Expire.Retry.Delay
|
||||
if delay == 0 {
|
||||
delay = batchExpireJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
var i int
|
||||
for toExpire := range expireCh {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
if i > 0 {
|
||||
if wait := globalBatchConfig.ExpirationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}
|
||||
i++
|
||||
wk.Take()
|
||||
go func(toExpire []expireObjInfo) {
|
||||
defer wk.Give()
|
||||
|
||||
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
|
||||
toDel := make([]ObjectToDelete, 0, len(toExpire))
|
||||
oiCache := newObjInfoCache()
|
||||
for _, exp := range toExpire {
|
||||
if exp.ExpireAll {
|
||||
toExpireAll = append(toExpireAll, exp.ObjectInfo)
|
||||
continue
|
||||
}
|
||||
// Cache ObjectInfo value via pointers for
|
||||
// subsequent use to track objects which
|
||||
// couldn't be deleted.
|
||||
od := ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: exp.Name,
|
||||
VersionID: exp.VersionID,
|
||||
},
|
||||
}
|
||||
toDel = append(toDel, od)
|
||||
oiCache.Add(od, &exp.ObjectInfo)
|
||||
}
|
||||
|
||||
var done bool
|
||||
// DeleteObject(deletePrefix: true) to expire all versions of an object
|
||||
for _, exp := range toExpireAll {
|
||||
var success bool
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
default:
|
||||
}
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
_, err := api.DeleteObject(ctx, exp.Bucket, encodeDirObject(exp.Name), ObjectOptions{
|
||||
DeletePrefix: true,
|
||||
DeletePrefixObject: true, // use prefix delete on exact object (this is an optimization to avoid fan-out calls)
|
||||
})
|
||||
if err != nil {
|
||||
stopFn(exp, err)
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts))
|
||||
} else {
|
||||
stopFn(exp, err)
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteMultiple objects
|
||||
toDelCopy := make([]ObjectToDelete, len(toDel))
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
// Copying toDel to select from objects whose
|
||||
// deletion failed
|
||||
copy(toDelCopy, toDel)
|
||||
var failed int
|
||||
errs := r.Expire(ctx, api, vc, toDel)
|
||||
// reslice toDel in preparation for next retry attempt
|
||||
toDel = toDel[:0]
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
stopFn(toDelCopy[i], err)
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID,
|
||||
err, attempts))
|
||||
failed++
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
|
||||
}
|
||||
if attempts != retryAttempts {
|
||||
// retry
|
||||
toDel = append(toDel, toDelCopy[i])
|
||||
}
|
||||
} else {
|
||||
stopFn(toDelCopy[i], nil)
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalBatchJobsMetrics.save(ri.JobID, ri)
|
||||
|
||||
if failed == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Add a delay between retry attempts
|
||||
if attempts < retryAttempts {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
}(toExpire)
|
||||
}
|
||||
}
|
||||
|
||||
type expireObjInfo struct {
|
||||
ObjectInfo
|
||||
ExpireAll bool
|
||||
}
|
||||
|
||||
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
|
||||
func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
||||
ri := &batchJobInfo{
|
||||
JobID: job.ID,
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_EXPIRATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wk, err := workers.New(workerSize)
|
||||
if err != nil {
|
||||
// invalid worker size.
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
}); err != nil {
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
|
||||
// Goroutine to periodically save batch-expire job's in-memory state
|
||||
saverQuitCh := make(chan struct{})
|
||||
go func() {
|
||||
saveTicker := time.NewTicker(10 * time.Second)
|
||||
defer saveTicker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancellation.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
expireCh := make(chan []expireObjInfo, workerSize)
|
||||
expireDoneCh := make(chan struct{})
|
||||
go func() {
|
||||
defer close(expireDoneCh)
|
||||
batchObjsForDelete(ctx, r, ri, job, api, wk, expireCh)
|
||||
}()
|
||||
|
||||
var (
|
||||
prevObj ObjectInfo
|
||||
matchedFilter BatchJobExpireFilter
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
)
|
||||
failed := true
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.Item.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
|
||||
var done bool
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
}
|
||||
// Send any remaining objects downstream
|
||||
if len(toDel) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case expireCh <- toDel:
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(expireCh)
|
||||
|
||||
<-expireDoneCh // waits for the expire goroutine to complete
|
||||
wk.Wait() // waits for all expire workers to retire
|
||||
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
// Close the saverQuitCh - this also triggers saving in-memory state
|
||||
// immediately one last time before we exit this method.
|
||||
xioutil.SafeClose(saverQuitCh)
|
||||
|
||||
// Notify expire jobs final status to the configured endpoint
|
||||
buf, _ := json.Marshal(ri)
|
||||
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
|
||||
batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//msgp:ignore batchExpireJobError
|
||||
type batchExpireJobError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
}
|
||||
|
||||
func (e batchExpireJobError) Error() string {
|
||||
return e.Description
|
||||
}
|
||||
|
||||
// maxBatchRules maximum number of rules a batch-expiry job supports
|
||||
const maxBatchRules = 50
|
||||
|
||||
// Validate validates the job definition input
|
||||
func (r *BatchJobExpire) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.APIVersion != batchExpireAPIVersion {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: "Unsupported batch expire API version",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if r.Bucket == "" {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: "Bucket argument missing",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {
|
||||
if isErrBucketNotFound(err) {
|
||||
return batchExpireJobError{
|
||||
Code: "NoSuchSourceBucket",
|
||||
Description: "The specified source bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(r.Rules) > maxBatchRules {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: "Too many rules. Batch expire job can't have more than 100 rules",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
for _, rule := range r.Rules {
|
||||
if err := rule.Validate(); err != nil {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid batch expire rule: %s", err),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Retry.Validate(); err != nil {
|
||||
return batchExpireJobError{
|
||||
Code: "InvalidArgument",
|
||||
Description: fmt.Sprintf("Invalid batch expire retry configuration: %s", err),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
856
cmd/batch-expire_gen.go
Normal file
856
cmd/batch-expire_gen.go
Normal file
@@ -0,0 +1,856 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "APIVersion":
|
||||
z.APIVersion, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APIVersion")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "NotificationCfg":
|
||||
err = z.NotificationCfg.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
case "Retry":
|
||||
err = z.Retry.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
case "Rules":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules")
|
||||
return
|
||||
}
|
||||
if cap(z.Rules) >= int(zb0002) {
|
||||
z.Rules = (z.Rules)[:zb0002]
|
||||
} else {
|
||||
z.Rules = make([]BatchJobExpireFilter, zb0002)
|
||||
}
|
||||
for za0001 := range z.Rules {
|
||||
err = z.Rules[za0001].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// write "APIVersion"
|
||||
err = en.Append(0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.APIVersion)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APIVersion")
|
||||
return
|
||||
}
|
||||
// write "Bucket"
|
||||
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
// write "Prefix"
|
||||
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// write "NotificationCfg"
|
||||
err = en.Append(0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.NotificationCfg.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
// write "Retry"
|
||||
err = en.Append(0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Retry.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
// write "Rules"
|
||||
err = en.Append(0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Rules)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.Rules {
|
||||
err = z.Rules[za0001].EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// string "APIVersion"
|
||||
o = append(o, 0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
o = msgp.AppendString(o, z.APIVersion)
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
// string "NotificationCfg"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
o, err = z.NotificationCfg.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
// string "Retry"
|
||||
o = append(o, 0xa5, 0x52, 0x65, 0x74, 0x72, 0x79)
|
||||
o, err = z.Retry.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
// string "Rules"
|
||||
o = append(o, 0xa5, 0x52, 0x75, 0x6c, 0x65, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Rules)))
|
||||
for za0001 := range z.Rules {
|
||||
o, err = z.Rules[za0001].MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "APIVersion":
|
||||
z.APIVersion, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "APIVersion")
|
||||
return
|
||||
}
|
||||
case "Bucket":
|
||||
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Bucket")
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "NotificationCfg":
|
||||
bts, err = z.NotificationCfg.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationCfg")
|
||||
return
|
||||
}
|
||||
case "Retry":
|
||||
bts, err = z.Retry.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Retry")
|
||||
return
|
||||
}
|
||||
case "Rules":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules")
|
||||
return
|
||||
}
|
||||
if cap(z.Rules) >= int(zb0002) {
|
||||
z.Rules = (z.Rules)[:zb0002]
|
||||
} else {
|
||||
z.Rules = make([]BatchJobExpireFilter, zb0002)
|
||||
}
|
||||
for za0001 := range z.Rules {
|
||||
bts, err = z.Rules[za0001].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Rules", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpire) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Rules {
|
||||
s += z.Rules[za0001].Msgsize()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
case "CreatedBefore":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
z.CreatedBefore = nil
|
||||
} else {
|
||||
if z.CreatedBefore == nil {
|
||||
z.CreatedBefore = new(time.Time)
|
||||
}
|
||||
*z.CreatedBefore, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Tags":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags")
|
||||
return
|
||||
}
|
||||
if cap(z.Tags) >= int(zb0002) {
|
||||
z.Tags = (z.Tags)[:zb0002]
|
||||
} else {
|
||||
z.Tags = make([]BatchJobKV, zb0002)
|
||||
}
|
||||
for za0001 := range z.Tags {
|
||||
err = z.Tags[za0001].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Metadata":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata")
|
||||
return
|
||||
}
|
||||
if cap(z.Metadata) >= int(zb0003) {
|
||||
z.Metadata = (z.Metadata)[:zb0003]
|
||||
} else {
|
||||
z.Metadata = make([]BatchJobKV, zb0003)
|
||||
}
|
||||
for za0002 := range z.Metadata {
|
||||
err = z.Metadata[za0002].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Size":
|
||||
err = z.Size.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
case "Type":
|
||||
z.Type, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Type")
|
||||
return
|
||||
}
|
||||
case "Name":
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Purge":
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.Purge.RetainVersions, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge", "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 8
|
||||
// write "OlderThan"
|
||||
err = en.Append(0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// write "CreatedBefore"
|
||||
err = en.Append(0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.CreatedBefore == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteTime(*z.CreatedBefore)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Tags"
|
||||
err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Tags)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.Tags {
|
||||
err = z.Tags[za0001].EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Metadata"
|
||||
err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Metadata)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata")
|
||||
return
|
||||
}
|
||||
for za0002 := range z.Metadata {
|
||||
err = z.Metadata[za0002].EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Size"
|
||||
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Size.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
// write "Type"
|
||||
err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Type)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Type")
|
||||
return
|
||||
}
|
||||
// write "Name"
|
||||
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Name)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
// write "Purge"
|
||||
err = en.Append(0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// map header, size 1
|
||||
// write "RetainVersions"
|
||||
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Purge.RetainVersions)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge", "RetainVersions")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 8
|
||||
// string "OlderThan"
|
||||
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
// string "CreatedBefore"
|
||||
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if z.CreatedBefore == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendTime(o, *z.CreatedBefore)
|
||||
}
|
||||
// string "Tags"
|
||||
o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Tags)))
|
||||
for za0001 := range z.Tags {
|
||||
o, err = z.Tags[za0001].MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "Metadata"
|
||||
o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata)))
|
||||
for za0002 := range z.Metadata {
|
||||
o, err = z.Metadata[za0002].MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "Size"
|
||||
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||
o, err = z.Size.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
// string "Type"
|
||||
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
o = msgp.AppendString(o, z.Type)
|
||||
// string "Name"
|
||||
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Purge"
|
||||
o = append(o, 0xa5, 0x50, 0x75, 0x72, 0x67, 0x65)
|
||||
// map header, size 1
|
||||
// string "RetainVersions"
|
||||
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
o = msgp.AppendInt(o, z.Purge.RetainVersions)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
case "CreatedBefore":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.CreatedBefore = nil
|
||||
} else {
|
||||
if z.CreatedBefore == nil {
|
||||
z.CreatedBefore = new(time.Time)
|
||||
}
|
||||
*z.CreatedBefore, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CreatedBefore")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Tags":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags")
|
||||
return
|
||||
}
|
||||
if cap(z.Tags) >= int(zb0002) {
|
||||
z.Tags = (z.Tags)[:zb0002]
|
||||
} else {
|
||||
z.Tags = make([]BatchJobKV, zb0002)
|
||||
}
|
||||
for za0001 := range z.Tags {
|
||||
bts, err = z.Tags[za0001].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tags", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Metadata":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata")
|
||||
return
|
||||
}
|
||||
if cap(z.Metadata) >= int(zb0003) {
|
||||
z.Metadata = (z.Metadata)[:zb0003]
|
||||
} else {
|
||||
z.Metadata = make([]BatchJobKV, zb0003)
|
||||
}
|
||||
for za0002 := range z.Metadata {
|
||||
bts, err = z.Metadata[za0002].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Metadata", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Size":
|
||||
bts, err = z.Size.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
case "Type":
|
||||
z.Type, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Type")
|
||||
return
|
||||
}
|
||||
case "Name":
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Purge":
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.Purge.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge", "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Purge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpireFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 14
|
||||
if z.CreatedBefore == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.TimeSize
|
||||
}
|
||||
s += 5 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Tags {
|
||||
s += z.Tags[za0001].Msgsize()
|
||||
}
|
||||
s += 9 + msgp.ArrayHeaderSize
|
||||
for za0002 := range z.Metadata {
|
||||
s += z.Metadata[za0002].Msgsize()
|
||||
}
|
||||
s += 5 + z.Size.Msgsize() + 5 + msgp.StringPrefixSize + len(z.Type) + 5 + msgp.StringPrefixSize + len(z.Name) + 6 + 1 + 15 + msgp.IntSize
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobExpirePurge) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.RetainVersions, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobExpirePurge) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "RetainVersions"
|
||||
err = en.Append(0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.RetainVersions)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetainVersions")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobExpirePurge) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "RetainVersions"
|
||||
o = append(o, 0x81, 0xae, 0x52, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
o = msgp.AppendInt(o, z.RetainVersions)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobExpirePurge) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "RetainVersions":
|
||||
z.RetainVersions, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetainVersions")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobExpirePurge) Msgsize() (s int) {
|
||||
s = 1 + 15 + msgp.IntSize
|
||||
return
|
||||
}
|
||||
349
cmd/batch-expire_gen_test.go
Normal file
349
cmd/batch-expire_gen_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBatchJobExpire(t *testing.T) {
|
||||
v := BatchJobExpire{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobExpire(t *testing.T) {
|
||||
v := BatchJobExpire{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobExpire Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobExpire{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobExpire(b *testing.B) {
|
||||
v := BatchJobExpire{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobExpireFilter(t *testing.T) {
|
||||
v := BatchJobExpireFilter{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobExpireFilter(t *testing.T) {
|
||||
v := BatchJobExpireFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobExpireFilter Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobExpireFilter{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobExpireFilter(b *testing.B) {
|
||||
v := BatchJobExpireFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobExpirePurge(t *testing.T) {
|
||||
v := BatchJobExpirePurge{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobExpirePurge(t *testing.T) {
|
||||
v := BatchJobExpirePurge{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobExpirePurge Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobExpirePurge{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobExpirePurge(b *testing.B) {
|
||||
v := BatchJobExpirePurge{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
71
cmd/batch-expire_test.go
Normal file
71
cmd/batch-expire_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestParseBatchJobExpire(t *testing.T) {
|
||||
expireYaml := `
|
||||
expire: # Expire objects that match a condition
|
||||
apiVersion: v1
|
||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 70h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
metadata:
|
||||
- key: content-type
|
||||
value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
size:
|
||||
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
|
||||
- type: deleted # objects with delete marker as their latest version
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
|
||||
notify:
|
||||
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -42,12 +42,6 @@ func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "Location":
|
||||
z.Location, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Location")
|
||||
return
|
||||
}
|
||||
case "Replicate":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
@@ -84,6 +78,24 @@ func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Expire":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
z.Expire = nil
|
||||
} else {
|
||||
if z.Expire == nil {
|
||||
z.Expire = new(BatchJobExpire)
|
||||
}
|
||||
err = z.Expire.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -128,16 +140,6 @@ func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
// write "Location"
|
||||
err = en.Append(0xa8, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Location)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Location")
|
||||
return
|
||||
}
|
||||
// write "Replicate"
|
||||
err = en.Append(0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
|
||||
if err != nil {
|
||||
@@ -172,6 +174,23 @@ func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Expire"
|
||||
err = en.Append(0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Expire == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = z.Expire.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -188,9 +207,6 @@ func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "Started"
|
||||
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Started)
|
||||
// string "Location"
|
||||
o = append(o, 0xa8, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e)
|
||||
o = msgp.AppendString(o, z.Location)
|
||||
// string "Replicate"
|
||||
o = append(o, 0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
|
||||
if z.Replicate == nil {
|
||||
@@ -213,6 +229,17 @@ func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// string "Expire"
|
||||
o = append(o, 0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
|
||||
if z.Expire == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o, err = z.Expire.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -252,12 +279,6 @@ func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "Started")
|
||||
return
|
||||
}
|
||||
case "Location":
|
||||
z.Location, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Location")
|
||||
return
|
||||
}
|
||||
case "Replicate":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
@@ -292,6 +313,23 @@ func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Expire":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Expire = nil
|
||||
} else {
|
||||
if z.Expire == nil {
|
||||
z.Expire = new(BatchJobExpire)
|
||||
}
|
||||
bts, err = z.Expire.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Expire")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -306,7 +344,7 @@ func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobRequest) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 5 + msgp.StringPrefixSize + len(z.User) + 8 + msgp.TimeSize + 9 + msgp.StringPrefixSize + len(z.Location) + 10
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 5 + msgp.StringPrefixSize + len(z.User) + 8 + msgp.TimeSize + 10
|
||||
if z.Replicate == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
@@ -318,6 +356,12 @@ func (z *BatchJobRequest) Msgsize() (s int) {
|
||||
} else {
|
||||
s += z.KeyRotate.Msgsize()
|
||||
}
|
||||
s += 7
|
||||
if z.Expire == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += z.Expire.Msgsize()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -375,6 +419,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
@@ -448,9 +498,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// write "v"
|
||||
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -509,6 +559,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "at"
|
||||
err = en.Append(0xa2, 0x61, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Attempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
// write "cmp"
|
||||
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
|
||||
if err != nil {
|
||||
@@ -615,9 +675,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// string "v"
|
||||
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
o = msgp.AppendInt(o, z.Version)
|
||||
// string "jid"
|
||||
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
|
||||
@@ -634,6 +694,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ra"
|
||||
o = append(o, 0xa2, 0x72, 0x61)
|
||||
o = msgp.AppendInt(o, z.RetryAttempts)
|
||||
// string "at"
|
||||
o = append(o, 0xa2, 0x61, 0x74)
|
||||
o = msgp.AppendInt(o, z.Attempts)
|
||||
// string "cmp"
|
||||
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
|
||||
o = msgp.AppendBool(o, z.Complete)
|
||||
@@ -721,6 +784,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
@@ -795,6 +864,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *batchJobInfo) Msgsize() (s int) {
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
@@ -18,24 +18,66 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
//msgp:ignore BatchJobYamlErr
|
||||
|
||||
// BatchJobYamlErr can be used to return yaml validation errors with line,
|
||||
// column information guiding user to fix syntax errors
|
||||
type BatchJobYamlErr struct {
|
||||
line, col int
|
||||
msg string
|
||||
}
|
||||
|
||||
// message returns the error message excluding line, col information.
|
||||
// Intended to be used in unit tests.
|
||||
func (b BatchJobYamlErr) message() string {
|
||||
return b.msg
|
||||
}
|
||||
|
||||
// Error implements Error interface
|
||||
func (b BatchJobYamlErr) Error() string {
|
||||
return fmt.Sprintf("%s\n Hint: error near line: %d, col: %d", b.msg, b.line, b.col)
|
||||
}
|
||||
|
||||
// BatchJobKV is a key-value data type which supports wildcard matching
|
||||
type BatchJobKV struct {
|
||||
Key string `yaml:"key" json:"key"`
|
||||
Value string `yaml:"value" json:"value"`
|
||||
line, col int
|
||||
Key string `yaml:"key" json:"key"`
|
||||
Value string `yaml:"value" json:"value"`
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobKV{}
|
||||
|
||||
// UnmarshalYAML - BatchJobKV extends default unmarshal to extract line, col information.
|
||||
func (kv *BatchJobKV) UnmarshalYAML(val *yaml.Node) error {
|
||||
type jobKV BatchJobKV
|
||||
var tmp jobKV
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*kv = BatchJobKV(tmp)
|
||||
kv.line, kv.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns an error if key is empty
|
||||
func (kv BatchJobKV) Validate() error {
|
||||
if kv.Key == "" {
|
||||
return errInvalidArgument
|
||||
return BatchJobYamlErr{
|
||||
line: kv.line,
|
||||
col: kv.col,
|
||||
msg: "key can't be empty",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -59,25 +101,190 @@ func (kv BatchJobKV) Match(ikv BatchJobKV) bool {
|
||||
// BatchJobNotification stores notification endpoint and token information.
|
||||
// Used by batch jobs to notify of their status.
|
||||
type BatchJobNotification struct {
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Token string `yaml:"token" json:"token"`
|
||||
line, col int
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Token string `yaml:"token" json:"token"`
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobNotification{}
|
||||
|
||||
// UnmarshalYAML - BatchJobNotification extends unmarshal to extract line, column information
|
||||
func (b *BatchJobNotification) UnmarshalYAML(val *yaml.Node) error {
|
||||
type notification BatchJobNotification
|
||||
var tmp notification
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*b = BatchJobNotification(tmp)
|
||||
b.line, b.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobRetry stores retry configuration used in the event of failures.
|
||||
type BatchJobRetry struct {
|
||||
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
|
||||
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
|
||||
line, col int
|
||||
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
|
||||
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobRetry{}
|
||||
|
||||
// UnmarshalYAML - BatchJobRetry extends unmarshal to extract line, column information
|
||||
func (r *BatchJobRetry) UnmarshalYAML(val *yaml.Node) error {
|
||||
type retry BatchJobRetry
|
||||
var tmp retry
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*r = BatchJobRetry(tmp)
|
||||
r.line, r.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate validates input replicate retries.
|
||||
func (r BatchJobRetry) Validate() error {
|
||||
if r.Attempts < 0 {
|
||||
return errInvalidArgument
|
||||
return BatchJobYamlErr{
|
||||
line: r.line,
|
||||
col: r.col,
|
||||
msg: "Invalid arguments specified",
|
||||
}
|
||||
}
|
||||
|
||||
if r.Delay < 0 {
|
||||
return errInvalidArgument
|
||||
return BatchJobYamlErr{
|
||||
line: r.line,
|
||||
col: r.col,
|
||||
msg: "Invalid arguments specified",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// # snowball based archive transfer is by default enabled when source
|
||||
// # is local and target is remote which is also minio.
|
||||
// snowball:
|
||||
// disable: false # optionally turn-off snowball archive transfer
|
||||
// batch: 100 # upto this many objects per archive
|
||||
// inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
// compress: true # S2/Snappy compressed archive
|
||||
// smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
// skipErrs: false # skips any source side read() errors
|
||||
|
||||
// BatchJobSnowball describes the snowball feature when replicating objects from a local source to a remote target
|
||||
type BatchJobSnowball struct {
|
||||
line, col int
|
||||
Disable *bool `yaml:"disable" json:"disable"`
|
||||
Batch *int `yaml:"batch" json:"batch"`
|
||||
InMemory *bool `yaml:"inmemory" json:"inmemory"`
|
||||
Compress *bool `yaml:"compress" json:"compress"`
|
||||
SmallerThan *string `yaml:"smallerThan" json:"smallerThan"`
|
||||
SkipErrs *bool `yaml:"skipErrs" json:"skipErrs"`
|
||||
}
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobSnowball{}
|
||||
|
||||
// UnmarshalYAML - BatchJobSnowball extends unmarshal to extract line, column information
|
||||
func (b *BatchJobSnowball) UnmarshalYAML(val *yaml.Node) error {
|
||||
type snowball BatchJobSnowball
|
||||
var tmp snowball
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*b = BatchJobSnowball(tmp)
|
||||
b.line, b.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate the snowball parameters in the job description
|
||||
func (b BatchJobSnowball) Validate() error {
|
||||
if *b.Batch <= 0 {
|
||||
return BatchJobYamlErr{
|
||||
line: b.line,
|
||||
col: b.col,
|
||||
msg: "batch number should be non positive zero",
|
||||
}
|
||||
}
|
||||
_, err := humanize.ParseBytes(*b.SmallerThan)
|
||||
if err != nil {
|
||||
return BatchJobYamlErr{
|
||||
line: b.line,
|
||||
col: b.col,
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobSizeFilter supports size based filters - LesserThan and GreaterThan
|
||||
type BatchJobSizeFilter struct {
|
||||
line, col int
|
||||
UpperBound BatchJobSize `yaml:"lessThan" json:"lessThan"`
|
||||
LowerBound BatchJobSize `yaml:"greaterThan" json:"greaterThan"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML - BatchJobSizeFilter extends unmarshal to extract line, column information
|
||||
func (sf *BatchJobSizeFilter) UnmarshalYAML(val *yaml.Node) error {
|
||||
type sizeFilter BatchJobSizeFilter
|
||||
var tmp sizeFilter
|
||||
err := val.Decode(&tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*sf = BatchJobSizeFilter(tmp)
|
||||
sf.line, sf.col = val.Line, val.Column
|
||||
return nil
|
||||
}
|
||||
|
||||
// InRange returns true in the following cases and false otherwise,
|
||||
// - sf.LowerBound < sz, when sf.LowerBound alone is specified
|
||||
// - sz < sf.UpperBound, when sf.UpperBound alone is specified
|
||||
// - sf.LowerBound < sz < sf.UpperBound when both are specified,
|
||||
func (sf BatchJobSizeFilter) InRange(sz int64) bool {
|
||||
if sf.UpperBound > 0 && sz > int64(sf.UpperBound) {
|
||||
return false
|
||||
}
|
||||
|
||||
if sf.LowerBound > 0 && sz < int64(sf.LowerBound) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Validate checks if sf is a valid batch-job size filter
|
||||
func (sf BatchJobSizeFilter) Validate() error {
|
||||
if sf.LowerBound > 0 && sf.UpperBound > 0 && sf.LowerBound >= sf.UpperBound {
|
||||
return BatchJobYamlErr{
|
||||
line: sf.line,
|
||||
col: sf.col,
|
||||
msg: "invalid batch-job size filter",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchJobSize supports humanized byte values in yaml files type BatchJobSize uint64
|
||||
type BatchJobSize int64
|
||||
|
||||
// UnmarshalYAML to parse humanized byte values
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var batchExpireSz string
|
||||
err := unmarshal(&batchExpireSz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sz, err := humanize.ParseBytes(batchExpireSz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = BatchJobSize(sz)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -389,3 +389,666 @@ func (z BatchJobRetry) Msgsize() (s int) {
|
||||
s = 1 + 9 + msgp.IntSize + 6 + msgp.DurationSize
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobSize) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
{
|
||||
var zb0001 int64
|
||||
zb0001, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
(*z) = BatchJobSize(zb0001)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobSize) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteInt64(int64(z))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobSize) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendInt64(o, int64(z))
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobSize) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
{
|
||||
var zb0001 int64
|
||||
zb0001, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
(*z) = BatchJobSize(zb0001)
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobSize) Msgsize() (s int) {
|
||||
s = msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobSizeFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UpperBound":
|
||||
{
|
||||
var zb0002 int64
|
||||
zb0002, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UpperBound")
|
||||
return
|
||||
}
|
||||
z.UpperBound = BatchJobSize(zb0002)
|
||||
}
|
||||
case "LowerBound":
|
||||
{
|
||||
var zb0003 int64
|
||||
zb0003, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LowerBound")
|
||||
return
|
||||
}
|
||||
z.LowerBound = BatchJobSize(zb0003)
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobSizeFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 2
|
||||
// write "UpperBound"
|
||||
err = en.Append(0x82, 0xaa, 0x55, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(int64(z.UpperBound))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UpperBound")
|
||||
return
|
||||
}
|
||||
// write "LowerBound"
|
||||
err = en.Append(0xaa, 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(int64(z.LowerBound))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LowerBound")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobSizeFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 2
|
||||
// string "UpperBound"
|
||||
o = append(o, 0x82, 0xaa, 0x55, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
o = msgp.AppendInt64(o, int64(z.UpperBound))
|
||||
// string "LowerBound"
|
||||
o = append(o, 0xaa, 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64)
|
||||
o = msgp.AppendInt64(o, int64(z.LowerBound))
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobSizeFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "UpperBound":
|
||||
{
|
||||
var zb0002 int64
|
||||
zb0002, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "UpperBound")
|
||||
return
|
||||
}
|
||||
z.UpperBound = BatchJobSize(zb0002)
|
||||
}
|
||||
case "LowerBound":
|
||||
{
|
||||
var zb0003 int64
|
||||
zb0003, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LowerBound")
|
||||
return
|
||||
}
|
||||
z.LowerBound = BatchJobSize(zb0003)
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobSizeFilter) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.Int64Size + 11 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobSnowball) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Disable":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
z.Disable = nil
|
||||
} else {
|
||||
if z.Disable == nil {
|
||||
z.Disable = new(bool)
|
||||
}
|
||||
*z.Disable, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Batch":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
z.Batch = nil
|
||||
} else {
|
||||
if z.Batch == nil {
|
||||
z.Batch = new(int)
|
||||
}
|
||||
*z.Batch, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "InMemory":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
z.InMemory = nil
|
||||
} else {
|
||||
if z.InMemory == nil {
|
||||
z.InMemory = new(bool)
|
||||
}
|
||||
*z.InMemory, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Compress":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
z.Compress = nil
|
||||
} else {
|
||||
if z.Compress == nil {
|
||||
z.Compress = new(bool)
|
||||
}
|
||||
*z.Compress, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SmallerThan":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
z.SmallerThan = nil
|
||||
} else {
|
||||
if z.SmallerThan == nil {
|
||||
z.SmallerThan = new(string)
|
||||
}
|
||||
*z.SmallerThan, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SkipErrs":
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
z.SkipErrs = nil
|
||||
} else {
|
||||
if z.SkipErrs == nil {
|
||||
z.SkipErrs = new(bool)
|
||||
}
|
||||
*z.SkipErrs, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobSnowball) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// write "Disable"
|
||||
err = en.Append(0x86, 0xa7, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Disable == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.Disable)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Batch"
|
||||
err = en.Append(0xa5, 0x42, 0x61, 0x74, 0x63, 0x68)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Batch == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteInt(*z.Batch)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "InMemory"
|
||||
err = en.Append(0xa8, 0x49, 0x6e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.InMemory == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.InMemory)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Compress"
|
||||
err = en.Append(0xa8, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.Compress == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.Compress)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "SmallerThan"
|
||||
err = en.Append(0xab, 0x53, 0x6d, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.SmallerThan == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteString(*z.SmallerThan)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "SkipErrs"
|
||||
err = en.Append(0xa8, 0x53, 0x6b, 0x69, 0x70, 0x45, 0x72, 0x72, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if z.SkipErrs == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteBool(*z.SkipErrs)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobSnowball) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// string "Disable"
|
||||
o = append(o, 0x86, 0xa7, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65)
|
||||
if z.Disable == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.Disable)
|
||||
}
|
||||
// string "Batch"
|
||||
o = append(o, 0xa5, 0x42, 0x61, 0x74, 0x63, 0x68)
|
||||
if z.Batch == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendInt(o, *z.Batch)
|
||||
}
|
||||
// string "InMemory"
|
||||
o = append(o, 0xa8, 0x49, 0x6e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79)
|
||||
if z.InMemory == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.InMemory)
|
||||
}
|
||||
// string "Compress"
|
||||
o = append(o, 0xa8, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73)
|
||||
if z.Compress == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.Compress)
|
||||
}
|
||||
// string "SmallerThan"
|
||||
o = append(o, 0xab, 0x53, 0x6d, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
if z.SmallerThan == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendString(o, *z.SmallerThan)
|
||||
}
|
||||
// string "SkipErrs"
|
||||
o = append(o, 0xa8, 0x53, 0x6b, 0x69, 0x70, 0x45, 0x72, 0x72, 0x73)
|
||||
if z.SkipErrs == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendBool(o, *z.SkipErrs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobSnowball) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Disable":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Disable = nil
|
||||
} else {
|
||||
if z.Disable == nil {
|
||||
z.Disable = new(bool)
|
||||
}
|
||||
*z.Disable, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disable")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Batch":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Batch = nil
|
||||
} else {
|
||||
if z.Batch == nil {
|
||||
z.Batch = new(int)
|
||||
}
|
||||
*z.Batch, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Batch")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "InMemory":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.InMemory = nil
|
||||
} else {
|
||||
if z.InMemory == nil {
|
||||
z.InMemory = new(bool)
|
||||
}
|
||||
*z.InMemory, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "InMemory")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Compress":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.Compress = nil
|
||||
} else {
|
||||
if z.Compress == nil {
|
||||
z.Compress = new(bool)
|
||||
}
|
||||
*z.Compress, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Compress")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SmallerThan":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.SmallerThan = nil
|
||||
} else {
|
||||
if z.SmallerThan == nil {
|
||||
z.SmallerThan = new(string)
|
||||
}
|
||||
*z.SmallerThan, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SmallerThan")
|
||||
return
|
||||
}
|
||||
}
|
||||
case "SkipErrs":
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.SkipErrs = nil
|
||||
} else {
|
||||
if z.SkipErrs == nil {
|
||||
z.SkipErrs = new(bool)
|
||||
}
|
||||
*z.SkipErrs, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "SkipErrs")
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobSnowball) Msgsize() (s int) {
|
||||
s = 1 + 8
|
||||
if z.Disable == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
s += 6
|
||||
if z.Batch == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.IntSize
|
||||
}
|
||||
s += 9
|
||||
if z.InMemory == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
s += 9
|
||||
if z.Compress == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
s += 12
|
||||
if z.SmallerThan == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.StringPrefixSize + len(*z.SmallerThan)
|
||||
}
|
||||
s += 9
|
||||
if z.SkipErrs == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += msgp.BoolSize
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -347,3 +347,229 @@ func BenchmarkDecodeBatchJobRetry(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobSizeFilter(t *testing.T) {
|
||||
v := BatchJobSizeFilter{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobSizeFilter(t *testing.T) {
|
||||
v := BatchJobSizeFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobSizeFilter Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobSizeFilter{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobSizeFilter(b *testing.B) {
|
||||
v := BatchJobSizeFilter{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobSnowball(t *testing.T) {
|
||||
v := BatchJobSnowball{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobSnowball(t *testing.T) {
|
||||
v := BatchJobSnowball{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobSnowball Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobSnowball{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobSnowball(b *testing.B) {
|
||||
v := BatchJobSnowball{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
148
cmd/batch-job-common-types_test.go
Normal file
148
cmd/batch-job-common-types_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBatchJobSizeInRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
objSize int64
|
||||
sizeFilter BatchJobSizeFilter
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
// 1Mib < 2Mib < 10MiB -> in range
|
||||
objSize: 2 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
// 2KiB < 1 MiB -> out of range from left
|
||||
objSize: 2 << 10,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
// 11MiB > 10 MiB -> out of range from right
|
||||
objSize: 11 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
// 2MiB < 10MiB -> in range
|
||||
objSize: 2 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
// 2MiB > 1MiB -> in range
|
||||
objSize: 2 << 20,
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) {
|
||||
if got := test.sizeFilter.InRange(test.objSize); got != test.want {
|
||||
t.Fatalf("Expected %v but got %v", test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchJobSizeValidate(t *testing.T) {
|
||||
errInvalidBatchJobSizeFilter := BatchJobYamlErr{
|
||||
msg: "invalid batch-job size filter",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
sizeFilter BatchJobSizeFilter
|
||||
err error
|
||||
}{
|
||||
{
|
||||
// Unspecified size filter is a valid filter
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 0,
|
||||
LowerBound: 0,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 0,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 10 << 20,
|
||||
LowerBound: 0,
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
// LowerBound > UpperBound -> empty range
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 1 << 20,
|
||||
LowerBound: 10 << 20,
|
||||
},
|
||||
err: errInvalidBatchJobSizeFilter,
|
||||
},
|
||||
{
|
||||
// LowerBound == UpperBound -> empty range
|
||||
sizeFilter: BatchJobSizeFilter{
|
||||
UpperBound: 1 << 20,
|
||||
LowerBound: 1 << 20,
|
||||
},
|
||||
err: errInvalidBatchJobSizeFilter,
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) {
|
||||
err := test.sizeFilter.Validate()
|
||||
if err != nil {
|
||||
gotErr := err.(BatchJobYamlErr)
|
||||
testErr := test.err.(BatchJobYamlErr)
|
||||
if gotErr.message() != testErr.message() {
|
||||
t.Fatalf("Expected %v but got %v", test.err, err)
|
||||
}
|
||||
}
|
||||
if err == nil && test.err != nil {
|
||||
t.Fatalf("Expected %v but got nil", test.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -155,6 +155,7 @@ type BatchJobReplicateSource struct {
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
||||
Snowball BatchJobSnowball `yaml:"snowball" json:"snowball"`
|
||||
}
|
||||
|
||||
// ValidPath returns true if path is valid
|
||||
|
||||
@@ -469,6 +469,12 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
case "Snowball":
|
||||
err = z.Snowball.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -482,9 +488,9 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// write "Type"
|
||||
err = en.Append(0x86, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
err = en.Append(0x87, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -569,15 +575,25 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "Creds", "SessionToken")
|
||||
return
|
||||
}
|
||||
// write "Snowball"
|
||||
err = en.Append(0xa8, 0x53, 0x6e, 0x6f, 0x77, 0x62, 0x61, 0x6c, 0x6c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Snowball.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 7
|
||||
// string "Type"
|
||||
o = append(o, 0x86, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
o = append(o, 0x87, 0xa4, 0x54, 0x79, 0x70, 0x65)
|
||||
o = msgp.AppendString(o, string(z.Type))
|
||||
// string "Bucket"
|
||||
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
|
||||
@@ -603,6 +619,13 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "SessionToken"
|
||||
o = append(o, 0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
|
||||
o = msgp.AppendString(o, z.Creds.SessionToken)
|
||||
// string "Snowball"
|
||||
o = append(o, 0xa8, 0x53, 0x6e, 0x6f, 0x77, 0x62, 0x61, 0x6c, 0x6c)
|
||||
o, err = z.Snowball.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -699,6 +722,12 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "Snowball":
|
||||
bts, err = z.Snowball.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Snowball")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -713,7 +742,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobReplicateSource) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken)
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -33,9 +33,8 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
)
|
||||
|
||||
// keyrotate:
|
||||
@@ -96,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
if e.Type == ssekms && spaces {
|
||||
return crypto.ErrInvalidEncryptionKeyID
|
||||
}
|
||||
|
||||
if e.Type == ssekms && GlobalKMS != nil {
|
||||
ctx := kms.Context{}
|
||||
if e.Context != "" {
|
||||
@@ -114,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -154,7 +154,6 @@ type BatchJobKeyRotateV1 struct {
|
||||
Flags BatchJobKeyRotateFlags `yaml:"flags" json:"flags"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Encryption BatchJobKeyRotateEncryption `yaml:"encryption" json:"encryption"`
|
||||
}
|
||||
|
||||
@@ -261,6 +260,9 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
return nil
|
||||
}
|
||||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
@@ -272,7 +274,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
skip := func(info FileInfo) (ok bool) {
|
||||
selectObj := func(info FileInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
|
||||
// skip all objects that are newer than specified older duration
|
||||
return false
|
||||
@@ -355,18 +357,23 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, ObjectOptions{
|
||||
WalkMarker: lastObject,
|
||||
WalkFilter: skip,
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
}); err != nil {
|
||||
cancel()
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
|
||||
for result := range results {
|
||||
result := result
|
||||
failed := false
|
||||
for res := range results {
|
||||
if res.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
break
|
||||
}
|
||||
result := res.Item
|
||||
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
|
||||
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
|
||||
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
|
||||
@@ -376,21 +383,19 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
go func() {
|
||||
defer wk.Give()
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
stopFn := globalBatchJobsMetrics.trace(batchKeyRotationMetricObject, job.ID, attempts, result)
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.KeyRotate(ctx, api, result); err != nil {
|
||||
stopFn(err)
|
||||
logger.LogIf(ctx, err)
|
||||
stopFn(result, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(nil)
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success)
|
||||
ri.RetryAttempts = attempts
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
if success {
|
||||
break
|
||||
}
|
||||
@@ -398,18 +403,22 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
|
||||
}
|
||||
}
|
||||
|
||||
if wait := globalBatchConfig.KeyRotationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wk.Wait()
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
cancel()
|
||||
@@ -470,8 +479,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Flags.Retry.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return r.Flags.Retry.Validate()
|
||||
}
|
||||
|
||||
@@ -409,12 +409,6 @@ func (z *BatchJobKeyRotateV1) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Encryption":
|
||||
err = z.Encryption.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
@@ -434,9 +428,9 @@ func (z *BatchJobKeyRotateV1) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BatchJobKeyRotateV1) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// map header, size 5
|
||||
// write "APIVersion"
|
||||
err = en.Append(0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
err = en.Append(0x85, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -501,16 +495,6 @@ func (z *BatchJobKeyRotateV1) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// write "Endpoint"
|
||||
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Endpoint)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
// write "Encryption"
|
||||
err = en.Append(0xaa, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e)
|
||||
if err != nil {
|
||||
@@ -527,9 +511,9 @@ func (z *BatchJobKeyRotateV1) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BatchJobKeyRotateV1) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// map header, size 5
|
||||
// string "APIVersion"
|
||||
o = append(o, 0x86, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
o = append(o, 0x85, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
|
||||
o = msgp.AppendString(o, z.APIVersion)
|
||||
// string "Flags"
|
||||
o = append(o, 0xa5, 0x46, 0x6c, 0x61, 0x67, 0x73)
|
||||
@@ -561,9 +545,6 @@ func (z *BatchJobKeyRotateV1) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
// string "Encryption"
|
||||
o = append(o, 0xaa, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e)
|
||||
o, err = z.Encryption.MarshalMsg(o)
|
||||
@@ -651,12 +632,6 @@ func (z *BatchJobKeyRotateV1) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
case "Endpoint":
|
||||
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Endpoint")
|
||||
return
|
||||
}
|
||||
case "Encryption":
|
||||
bts, err = z.Encryption.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
@@ -677,7 +652,7 @@ func (z *BatchJobKeyRotateV1) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobKeyRotateV1) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 6 + 1 + 7 + z.Flags.Filter.Msgsize() + 7 + z.Flags.Notify.Msgsize() + 6 + z.Flags.Retry.Msgsize() + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 11 + z.Encryption.Msgsize()
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 6 + 1 + 7 + z.Flags.Filter.Msgsize() + 7 + z.Flags.Notify.Msgsize() + 6 + z.Flags.Retry.Msgsize() + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 11 + z.Encryption.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -8,13 +8,14 @@ func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[batchReplicationMetricObject-0]
|
||||
_ = x[batchKeyRotationMetricObject-1]
|
||||
_ = x[batchJobMetricReplication-0]
|
||||
_ = x[batchJobMetricKeyRotation-1]
|
||||
_ = x[batchJobMetricExpire-2]
|
||||
}
|
||||
|
||||
const _batchJobMetric_name = "batchReplicationMetricObjectbatchKeyRotationMetricObject"
|
||||
const _batchJobMetric_name = "ReplicationKeyRotationExpire"
|
||||
|
||||
var _batchJobMetric_index = [...]uint8{0, 28, 56}
|
||||
var _batchJobMetric_index = [...]uint8{0, 11, 22, 28}
|
||||
|
||||
func (i batchJobMetric) String() string {
|
||||
if i >= batchJobMetric(len(_batchJobMetric_index)-1) {
|
||||
|
||||
@@ -20,25 +20,23 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/ringbuffer"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow io.WriteCloser
|
||||
closeWithErr func(err error) error
|
||||
closeWithErr func(err error)
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
@@ -66,7 +64,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
// Close the underlying writer.
|
||||
// This will also flush the ring buffer if used.
|
||||
err := b.iow.Close()
|
||||
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
@@ -77,29 +78,34 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
if b.canClose != nil {
|
||||
b.canClose.Wait()
|
||||
}
|
||||
|
||||
// Recycle the buffer.
|
||||
if b.byteBuf != nil {
|
||||
globalBytePoolCap.Load().Put(b.byteBuf)
|
||||
b.byteBuf = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
|
||||
// The output is written to the supplied writer w.
|
||||
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
|
||||
// Similar to CloseWithError on pipes we always return nil.
|
||||
return nil
|
||||
}}
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
h := algo.New()
|
||||
buf := globalBytePoolCap.Load().Get()
|
||||
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
|
||||
|
||||
bw := &streamingBitrotWriter{
|
||||
iow: ioutil.NewDeadlineWriter(w, diskMaxTimeout),
|
||||
closeWithErr: w.CloseWithError,
|
||||
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: rb.CloseWithError,
|
||||
h: h,
|
||||
shardSize: shardSize,
|
||||
canClose: &sync.WaitGroup{},
|
||||
byteBuf: buf,
|
||||
}
|
||||
bw.canClose.Add(1)
|
||||
go func() {
|
||||
@@ -110,7 +116,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
|
||||
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
@@ -134,7 +140,7 @@ func (b *streamingBitrotReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
// drain the body for connection re-use at network layer.
|
||||
// drain the body for connection reuse at network layer.
|
||||
xhttp.DrainBody(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
@@ -154,29 +160,12 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
}
|
||||
ignoredErrs := []error{
|
||||
errDiskNotFound,
|
||||
}
|
||||
if strings.HasPrefix(b.volume, minioMetaBucket) {
|
||||
ignoredErrs = append(ignoredErrs,
|
||||
errFileNotFound,
|
||||
errVolumeNotFound,
|
||||
errFileVersionNotFound,
|
||||
)
|
||||
}
|
||||
if b.rc == nil {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
if len(b.data) == 0 && b.tillOffset != streamOffset {
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
if !IsErr(err, ignoredErrs...) {
|
||||
logger.LogOnceIf(GlobalContext,
|
||||
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
|
||||
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream-"+b.volume+"-"+b.filePath)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
|
||||
}
|
||||
@@ -198,10 +187,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
|
||||
return 0, errFileCorrupt
|
||||
}
|
||||
b.currOffset += int64(len(buf))
|
||||
|
||||
@@ -19,11 +19,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// Implementation to calculate bitrot for the whole file.
|
||||
@@ -38,12 +35,10 @@ type wholeBitrotWriter struct {
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -72,12 +67,10 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -102,9 +102,9 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
if algo == HighwayHash256S {
|
||||
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
|
||||
return newStreamingBitrotWriter(disk, origvolume, volume, filePath, length, algo, shardSize)
|
||||
}
|
||||
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
|
||||
writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
_, err = writer.Write([]byte("aaaaaaaaaa"))
|
||||
if err != nil {
|
||||
|
||||
@@ -19,86 +19,90 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/rest"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
)
|
||||
|
||||
const (
|
||||
bootstrapRESTVersion = "v1"
|
||||
bootstrapRESTVersionPrefix = SlashSeparator + bootstrapRESTVersion
|
||||
bootstrapRESTPrefix = minioReservedBucketPath + "/bootstrap"
|
||||
bootstrapRESTPath = bootstrapRESTPrefix + bootstrapRESTVersionPrefix
|
||||
)
|
||||
|
||||
const (
|
||||
bootstrapRESTMethodHealth = "/health"
|
||||
bootstrapRESTMethodVerify = "/verify"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
type bootstrapRESTServer struct{}
|
||||
|
||||
//go:generate msgp -file=$GOFILE
|
||||
|
||||
// ServerSystemConfig - captures information about server configuration.
|
||||
type ServerSystemConfig struct {
|
||||
MinioEndpoints EndpointServerPools
|
||||
MinioEnv map[string]string
|
||||
NEndpoints int
|
||||
CmdLines []string
|
||||
MinioEnv map[string]string
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
|
||||
s2.MinioEndpoints.NEndpoints())
|
||||
func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
if s1.Checksum != s2.Checksum {
|
||||
return fmt.Errorf("Expected MinIO binary checksum: %s, seen: %s", s1.Checksum, s2.Checksum)
|
||||
}
|
||||
|
||||
for i, ep := range s1.MinioEndpoints {
|
||||
if ep.CmdLine != s2.MinioEndpoints[i].CmdLine {
|
||||
return fmt.Errorf("Expected command line argument %s, seen %s", ep.CmdLine,
|
||||
s2.MinioEndpoints[i].CmdLine)
|
||||
}
|
||||
if ep.SetCount != s2.MinioEndpoints[i].SetCount {
|
||||
return fmt.Errorf("Expected set count %d, seen %d", ep.SetCount,
|
||||
s2.MinioEndpoints[i].SetCount)
|
||||
}
|
||||
if ep.DrivesPerSet != s2.MinioEndpoints[i].DrivesPerSet {
|
||||
return fmt.Errorf("Expected drives pet set %d, seen %d", ep.DrivesPerSet,
|
||||
s2.MinioEndpoints[i].DrivesPerSet)
|
||||
}
|
||||
if ep.Platform != s2.MinioEndpoints[i].Platform {
|
||||
return fmt.Errorf("Expected platform '%s', found to be on '%s'",
|
||||
ep.Platform, s2.MinioEndpoints[i].Platform)
|
||||
ns1 := s1.NEndpoints
|
||||
ns2 := s2.NEndpoints
|
||||
if ns1 != ns2 {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", ns1, ns2)
|
||||
}
|
||||
|
||||
for i, cmdLine := range s1.CmdLines {
|
||||
if cmdLine != s2.CmdLines[i] {
|
||||
return fmt.Errorf("Expected command line argument %s, seen %s", cmdLine,
|
||||
s2.CmdLines[i])
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s / Mismatch environment values: %s`, missing, mismatching)
|
||||
}
|
||||
return fmt.Errorf(`Expected same MINIO_ environment variables and values across all servers: Missing environment values: %s`, missing)
|
||||
|
||||
if reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
// Report differences in environment variables.
|
||||
var missing []string
|
||||
var mismatching []string
|
||||
for k, v := range s1.MinioEnv {
|
||||
ev, ok := s2.MinioEnv[k]
|
||||
if !ok {
|
||||
missing = append(missing, k)
|
||||
} else if v != ev {
|
||||
mismatching = append(mismatching, k)
|
||||
}
|
||||
}
|
||||
var extra []string
|
||||
for k := range s2.MinioEnv {
|
||||
_, ok := s1.MinioEnv[k]
|
||||
if !ok {
|
||||
extra = append(extra, k)
|
||||
}
|
||||
}
|
||||
msg := "Expected MINIO_* environment name and values across all servers to be same: "
|
||||
if len(missing) > 0 {
|
||||
msg += fmt.Sprintf(`Missing environment values: %v. `, missing)
|
||||
}
|
||||
if len(mismatching) > 0 {
|
||||
msg += fmt.Sprintf(`Mismatching environment values: %v. `, mismatching)
|
||||
}
|
||||
if len(extra) > 0 {
|
||||
msg += fmt.Sprintf(`Extra environment values: %v. `, extra)
|
||||
}
|
||||
|
||||
return errors.New(strings.TrimSpace(msg))
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
@@ -112,7 +116,7 @@ var skipEnvs = map[string]struct{}{
|
||||
"MINIO_SECRET_KEY": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() ServerSystemConfig {
|
||||
func getServerSystemCfg() *ServerSystemConfig {
|
||||
envs := env.List("MINIO_")
|
||||
envValues := make(map[string]string, len(envs))
|
||||
for _, envK := range envs {
|
||||
@@ -125,131 +129,127 @@ func getServerSystemCfg() ServerSystemConfig {
|
||||
}
|
||||
envValues[envK] = logger.HashString(env.Get(envK, ""))
|
||||
}
|
||||
return ServerSystemConfig{
|
||||
MinioEndpoints: globalEndpoints,
|
||||
MinioEnv: envValues,
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues, Checksum: binaryChecksum}
|
||||
var cmdLines []string
|
||||
for _, ep := range globalEndpoints {
|
||||
cmdLines = append(cmdLines, ep.CmdLine)
|
||||
}
|
||||
scfg.CmdLines = cmdLines
|
||||
return scfg
|
||||
}
|
||||
|
||||
func (b *bootstrapRESTServer) writeErrorResponse(w http.ResponseWriter, err error) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
w.Write([]byte(err.Error()))
|
||||
func (s *bootstrapRESTServer) VerifyHandler(params *grid.MSS) (*ServerSystemConfig, *grid.RemoteErr) {
|
||||
return getServerSystemCfg(), nil
|
||||
}
|
||||
|
||||
// HealthHandler returns success if request is valid
|
||||
func (b *bootstrapRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request) {}
|
||||
|
||||
func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
|
||||
if err := storageServerRequestValidate(r); err != nil {
|
||||
b.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
cfg := getServerSystemCfg()
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
|
||||
}
|
||||
var serverVerifyHandler = grid.NewSingleHandler[*grid.MSS, *ServerSystemConfig](grid.HandlerServerVerify, grid.NewMSS, func() *ServerSystemConfig { return &ServerSystemConfig{} })
|
||||
|
||||
// registerBootstrapRESTHandlers - register bootstrap rest router.
|
||||
func registerBootstrapRESTHandlers(router *mux.Router) {
|
||||
h := func(f http.HandlerFunc) http.HandlerFunc {
|
||||
return collectInternodeStats(httpTraceHdrs(f))
|
||||
}
|
||||
|
||||
func registerBootstrapRESTHandlers(gm *grid.Manager) {
|
||||
server := &bootstrapRESTServer{}
|
||||
subrouter := router.PathPrefix(bootstrapRESTPrefix).Subrouter()
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodHealth).HandlerFunc(
|
||||
h(server.HealthHandler))
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodVerify).HandlerFunc(
|
||||
h(server.VerifyHandler))
|
||||
logger.FatalIf(serverVerifyHandler.Register(gm, server.VerifyHandler), "unable to register handler")
|
||||
}
|
||||
|
||||
// client to talk to bootstrap NEndpoints.
|
||||
type bootstrapRESTClient struct {
|
||||
endpoint Endpoint
|
||||
restClient *rest.Client
|
||||
gridConn *grid.Connection
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
if values == nil {
|
||||
values = make(url.Values)
|
||||
// Verify function verifies the server config.
|
||||
func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg *ServerSystemConfig) (err error) {
|
||||
if newObjectLayerFn() != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
recvCfg, err := serverVerifyHandler.Call(ctx, client.gridConn, grid.NewMSS())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We do not need the response after returning.
|
||||
defer serverVerifyHandler.PutResponse(recvCfg)
|
||||
|
||||
return nil, err
|
||||
return srcCfg.Diff(recvCfg)
|
||||
}
|
||||
|
||||
// Stringer provides a canonicalized representation of node.
|
||||
func (client *bootstrapRESTClient) String() string {
|
||||
return client.endpoint.String()
|
||||
return client.gridConn.String()
|
||||
}
|
||||
|
||||
// Verify - fetches system server config.
|
||||
func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSystemConfig) (err error) {
|
||||
if newObjectLayerFn() != nil {
|
||||
return nil
|
||||
var binaryChecksum = getBinaryChecksum()
|
||||
|
||||
func getBinaryChecksum() string {
|
||||
mw := md5.New()
|
||||
b, err := os.Open(os.Args[0])
|
||||
if err == nil {
|
||||
defer b.Close()
|
||||
io.Copy(mw, b)
|
||||
}
|
||||
respBody, err := client.callWithContext(ctx, bootstrapRESTMethodVerify, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer xhttp.DrainBody(respBody)
|
||||
recvCfg := ServerSystemConfig{}
|
||||
if err = json.NewDecoder(respBody).Decode(&recvCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
return srcCfg.Diff(recvCfg)
|
||||
return hex.EncodeToString(mw.Sum(nil))
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools) error {
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools, gm *grid.Manager) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointServerPools)
|
||||
clnts := newBootstrapRESTClients(endpointServerPools, gm)
|
||||
var onlineServers int
|
||||
var offlineEndpoints []error
|
||||
var incorrectConfigs []error
|
||||
var retries int
|
||||
var mu sync.Mutex
|
||||
for onlineServers < len(clnts)/2 {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(clnts))
|
||||
onlineServers = 0
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %v", err, clnt.endpoint))
|
||||
if !isNetworkError(err) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err), clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
|
||||
} else {
|
||||
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt.String(), err))
|
||||
go func(clnt *bootstrapRESTClient) {
|
||||
defer wg.Done()
|
||||
|
||||
if clnt.gridConn.State() != grid.StateConnected {
|
||||
mu.Lock()
|
||||
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, grid.ErrDisconnected))
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
onlineServers++
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := clnt.Verify(ctx, srcCfg)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
bootstrapTraceMsg(fmt.Sprintf("bootstrapVerify: %v, endpoint: %s", err, clnt))
|
||||
if !isNetworkError(err) {
|
||||
bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
|
||||
} else {
|
||||
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err))
|
||||
}
|
||||
} else {
|
||||
onlineServers++
|
||||
}
|
||||
mu.Unlock()
|
||||
}(clnt)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// Sleep and stagger to avoid blocked CPU and thundering
|
||||
// herd upon start up sequence.
|
||||
time.Sleep(25*time.Millisecond + time.Duration(rand.Int63n(int64(100*time.Millisecond))))
|
||||
retries++
|
||||
// after 20 retries start logging that servers are not reachable yet
|
||||
if retries >= 20 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers with valid configuration to be online", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Waiting for at least %d remote servers with valid configuration to be online", len(clnts)/2))
|
||||
if len(offlineEndpoints) > 0 {
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
}
|
||||
if len(incorrectConfigs) > 0 {
|
||||
logger.Info(fmt.Sprintf("Following servers have mismatching configuration %s", incorrectConfigs))
|
||||
}
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
retries = 0 // reset to log again after 20 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
incorrectConfigs = nil
|
||||
@@ -258,39 +258,20 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBootstrapRESTClients(endpointServerPools EndpointServerPools) []*bootstrapRESTClient {
|
||||
seenHosts := set.NewStringSet()
|
||||
func newBootstrapRESTClients(endpointServerPools EndpointServerPools, gm *grid.Manager) []*bootstrapRESTClient {
|
||||
seenClient := set.NewStringSet()
|
||||
var clnts []*bootstrapRESTClient
|
||||
for _, ep := range endpointServerPools {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
if endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for remote endpoints.
|
||||
if !endpoint.IsLocal {
|
||||
cl := newBootstrapRESTClient(endpoint)
|
||||
if serverDebugLog {
|
||||
cl.restClient.TraceOutput = os.Stdout
|
||||
}
|
||||
clnts = append(clnts, cl)
|
||||
if seenClient.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenClient.Add(endpoint.Host)
|
||||
clnts = append(clnts, &bootstrapRESTClient{gm.Connection(endpoint.GridHost())})
|
||||
}
|
||||
}
|
||||
return clnts
|
||||
}
|
||||
|
||||
// Returns a new bootstrap client.
|
||||
func newBootstrapRESTClient(endpoint Endpoint) *bootstrapRESTClient {
|
||||
serverURL := &url.URL{
|
||||
Scheme: endpoint.Scheme,
|
||||
Host: endpoint.Host,
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
|
||||
restClient.HealthCheckFn = nil
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient}
|
||||
}
|
||||
|
||||
296
cmd/bootstrap-peer-server_gen.go
Normal file
296
cmd/bootstrap-peer-server_gen.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NEndpoints":
|
||||
z.NEndpoints, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NEndpoints")
|
||||
return
|
||||
}
|
||||
case "CmdLines":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CmdLines")
|
||||
return
|
||||
}
|
||||
if cap(z.CmdLines) >= int(zb0002) {
|
||||
z.CmdLines = (z.CmdLines)[:zb0002]
|
||||
} else {
|
||||
z.CmdLines = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.CmdLines {
|
||||
z.CmdLines[za0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CmdLines", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "MinioEnv":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
if z.MinioEnv == nil {
|
||||
z.MinioEnv = make(map[string]string, zb0003)
|
||||
} else if len(z.MinioEnv) > 0 {
|
||||
for key := range z.MinioEnv {
|
||||
delete(z.MinioEnv, key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
zb0003--
|
||||
var za0002 string
|
||||
var za0003 string
|
||||
za0002, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
za0003, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv", za0002)
|
||||
return
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 4
|
||||
// write "NEndpoints"
|
||||
err = en.Append(0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.NEndpoints)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NEndpoints")
|
||||
return
|
||||
}
|
||||
// write "CmdLines"
|
||||
err = en.Append(0xa8, 0x43, 0x6d, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.CmdLines)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CmdLines")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.CmdLines {
|
||||
err = en.WriteString(z.CmdLines[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CmdLines", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "MinioEnv"
|
||||
err = en.Append(0xa8, 0x4d, 0x69, 0x6e, 0x69, 0x6f, 0x45, 0x6e, 0x76)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.MinioEnv)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
for za0002, za0003 := range z.MinioEnv {
|
||||
err = en.WriteString(za0002)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(za0003)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Checksum"
|
||||
err = en.Append(0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 4
|
||||
// string "NEndpoints"
|
||||
o = append(o, 0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = msgp.AppendInt(o, z.NEndpoints)
|
||||
// string "CmdLines"
|
||||
o = append(o, 0xa8, 0x43, 0x6d, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.CmdLines)))
|
||||
for za0001 := range z.CmdLines {
|
||||
o = msgp.AppendString(o, z.CmdLines[za0001])
|
||||
}
|
||||
// string "MinioEnv"
|
||||
o = append(o, 0xa8, 0x4d, 0x69, 0x6e, 0x69, 0x6f, 0x45, 0x6e, 0x76)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.MinioEnv)))
|
||||
for za0002, za0003 := range z.MinioEnv {
|
||||
o = msgp.AppendString(o, za0002)
|
||||
o = msgp.AppendString(o, za0003)
|
||||
}
|
||||
// string "Checksum"
|
||||
o = append(o, 0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
o = msgp.AppendString(o, z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NEndpoints":
|
||||
z.NEndpoints, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NEndpoints")
|
||||
return
|
||||
}
|
||||
case "CmdLines":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CmdLines")
|
||||
return
|
||||
}
|
||||
if cap(z.CmdLines) >= int(zb0002) {
|
||||
z.CmdLines = (z.CmdLines)[:zb0002]
|
||||
} else {
|
||||
z.CmdLines = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.CmdLines {
|
||||
z.CmdLines[za0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "CmdLines", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "MinioEnv":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
if z.MinioEnv == nil {
|
||||
z.MinioEnv = make(map[string]string, zb0003)
|
||||
} else if len(z.MinioEnv) > 0 {
|
||||
for key := range z.MinioEnv {
|
||||
delete(z.MinioEnv, key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
var za0002 string
|
||||
var za0003 string
|
||||
zb0003--
|
||||
za0002, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
za0003, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv", za0002)
|
||||
return
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *ServerSystemConfig) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.IntSize + 9 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.CmdLines {
|
||||
s += msgp.StringPrefixSize + len(z.CmdLines[za0001])
|
||||
}
|
||||
s += 9 + msgp.MapHeaderSize
|
||||
if z.MinioEnv != nil {
|
||||
for za0002, za0003 := range z.MinioEnv {
|
||||
_ = za0003
|
||||
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
|
||||
}
|
||||
}
|
||||
s += 9 + msgp.StringPrefixSize + len(z.Checksum)
|
||||
return
|
||||
}
|
||||
123
cmd/bootstrap-peer-server_gen_test.go
Normal file
123
cmd/bootstrap-peer-server_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalServerSystemConfig(t *testing.T) {
|
||||
v := ServerSystemConfig{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgServerSystemConfig(b *testing.B) {
|
||||
v := ServerSystemConfig{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgServerSystemConfig(b *testing.B) {
|
||||
v := ServerSystemConfig{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalServerSystemConfig(b *testing.B) {
|
||||
v := ServerSystemConfig{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeServerSystemConfig(t *testing.T) {
|
||||
v := ServerSystemConfig{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeServerSystemConfig Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := ServerSystemConfig{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeServerSystemConfig(b *testing.B) {
|
||||
v := ServerSystemConfig{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeServerSystemConfig(b *testing.B) {
|
||||
v := ServerSystemConfig{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/kes-go"
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
@@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
@@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
|
||||
@@ -60,8 +60,8 @@ import (
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -71,6 +71,8 @@ const (
|
||||
|
||||
xMinIOErrCodeHeader = "x-minio-error-code"
|
||||
xMinIOErrDescHeader = "x-minio-error-desc"
|
||||
|
||||
postPolicyBucketTagging = "tagging"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -89,7 +91,7 @@ const (
|
||||
// -- If IP of the entry doesn't match, this means entry is
|
||||
//
|
||||
// for another instance. Log an error to console.
|
||||
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
||||
if len(buckets) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -97,7 +99,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
dnsLogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -110,10 +112,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
domainMissing := err == dns.ErrDomainMissing
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
bucketsSet.Add(bucket)
|
||||
r, ok := dnsBuckets[bucket]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
@@ -132,7 +134,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -141,7 +143,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
bucketsInConflict.Add(bucket)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,13 +161,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
ctx := GlobalContext
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
dnsLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -186,7 +188,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}(bucket)
|
||||
@@ -226,7 +228,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -423,7 +425,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// Content-Md5 is required should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
@@ -466,13 +468,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
deleteObjectsFn := objectAPI.DeleteObjects
|
||||
|
||||
// Return Malformed XML as S3 spec if the number of objects is empty
|
||||
@@ -610,6 +605,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
})
|
||||
|
||||
// Are all objects saying bucket not found?
|
||||
if isAllBucketsNotFound(errs) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errs[0]), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for i := range errs {
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
@@ -617,7 +618,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
ObjectName: decodeDirObject(dObjects[i].ObjectName),
|
||||
VersionID: dObjects[i].VersionID,
|
||||
},
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus(),
|
||||
@@ -668,6 +669,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Delete(bucket, dobj.ObjectName)
|
||||
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
// copy so we can re-add null ID.
|
||||
dobj := dobj
|
||||
@@ -788,7 +791,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// check if client is attempting to create more buckets, complain about it.
|
||||
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
|
||||
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets))
|
||||
internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind)
|
||||
}
|
||||
|
||||
opts := MakeBucketOptions{
|
||||
@@ -869,7 +872,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Call site replication hook
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
|
||||
replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket))
|
||||
@@ -1192,7 +1195,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
})
|
||||
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
opts, err = putOptsFromReq(ctx, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
@@ -1216,11 +1219,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
reader io.Reader
|
||||
keyID string
|
||||
@@ -1376,7 +1374,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Notify object created events.
|
||||
sendEvent(eventArgsList[i])
|
||||
|
||||
if eventArgsList[i].Object.NumVersions > dataScannerExcessiveVersionsThreshold {
|
||||
if eventArgsList[i].Object.NumVersions > int(scannerExcessObjectVersions.Load()) {
|
||||
// Send events for excessive versions.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectManyVersions,
|
||||
@@ -1387,22 +1385,46 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
UserAgent: r.UserAgent() + " " + "MinIO-Fan-Out",
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
auditLogInternal(context.Background(), AuditLogOptions{
|
||||
Event: "scanner:manyversions",
|
||||
APIName: "PostPolicyBucket",
|
||||
Bucket: eventArgsList[i].Object.Bucket,
|
||||
Object: eventArgsList[i].Object.Name,
|
||||
VersionID: eventArgsList[i].Object.VersionID,
|
||||
Status: http.StatusText(http.StatusOK),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if formValues.Get(postPolicyBucketTagging) != "" {
|
||||
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
tagsStr := tags.String()
|
||||
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
|
||||
} else {
|
||||
// avoid user set an invalid tag using `X-Amz-Tagging`
|
||||
delete(opts.UserDefined, xhttp.AmzObjectTagging)
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
etag := getDecryptedETag(formValues, objInfo, false)
|
||||
|
||||
// We must not use the http.Header().Set method here because some (broken)
|
||||
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
|
||||
// Therefore, we have to set the ETag directly as map entry.
|
||||
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
|
||||
w.Header()[xhttp.ETag] = []string{`"` + etag + `"`}
|
||||
|
||||
// Set the relevant version ID as part of the response header.
|
||||
if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID {
|
||||
@@ -1424,7 +1446,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
if objInfo.NumVersions > dataScannerExcessiveVersionsThreshold {
|
||||
if objInfo.NumVersions > int(scannerExcessObjectVersions.Load()) {
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectManyVersions,
|
||||
BucketName: objInfo.Bucket,
|
||||
@@ -1434,6 +1456,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
auditLogInternal(context.Background(), AuditLogOptions{
|
||||
Event: "scanner:manyversions",
|
||||
APIName: "PostPolicyBucket",
|
||||
Bucket: objInfo.Bucket,
|
||||
Object: objInfo.Name,
|
||||
VersionID: objInfo.VersionID,
|
||||
Status: http.StatusText(http.StatusOK),
|
||||
})
|
||||
}
|
||||
|
||||
if redirectURL != nil { // success_action_redirect is valid and set.
|
||||
@@ -1612,7 +1643,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
case rcfg != nil && rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1620,9 +1651,11 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// Return an error if the bucket does not exist
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil && !forceDelete {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
if !forceDelete {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to delete bucket.
|
||||
@@ -1642,7 +1675,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
|
||||
dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1652,7 +1685,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
@@ -1725,7 +1758,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
@@ -1829,7 +1862,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
@@ -1905,7 +1938,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
UpdatedAt: updatedAt,
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestRemoveBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testRemoveBucketHandler, endpoints: []string{"RemoveBucket"}})
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -78,7 +78,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketLocationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketLocationHandler, endpoints: []string{"GetBucketLocation"}})
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -220,7 +220,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestHeadBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testHeadBucketHandler, endpoints: []string{"HeadBucket"}})
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -252,7 +252,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
},
|
||||
// Test case - 3.
|
||||
// Testing for signature mismatch error.
|
||||
// setting invalid acess and secret key.
|
||||
// setting invalid access and secret key.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
@@ -322,7 +322,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListMultipartUploadsHandler, endpoints: []string{"ListMultipartUploads"}})
|
||||
}
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
@@ -415,7 +415,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
shouldPass: false,
|
||||
},
|
||||
// Test case - 6.
|
||||
// Setting a negative value to max-uploads paramater, should result in http.StatusBadRequest.
|
||||
// Setting a negative value to max-uploads parameter, should result in http.StatusBadRequest.
|
||||
{
|
||||
bucket: bucketName,
|
||||
prefix: "",
|
||||
@@ -558,7 +558,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListBucketsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListBucketsHandler, endpoints: []string{"ListBuckets"}})
|
||||
}
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
@@ -580,7 +580,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 2.
|
||||
// Test case with invalid accessKey to produce and validate Signature MisMatch error.
|
||||
// Test case with invalid accessKey to produce and validate Signature Mismatch error.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
accessKey: "abcd",
|
||||
@@ -649,7 +649,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteMultipleObjectsHandler, endpoints: []string{"DeleteMultipleObjects", "PutBucketPolicy"}})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -880,6 +880,15 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
expectedContent: encodedAnonResponseWithPartialPublicAccess,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 7.
|
||||
// Bucket does not exist.
|
||||
7: {
|
||||
bucket: "unknown-bucket-name",
|
||||
objects: successRequest0,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
@@ -888,13 +897,12 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// Generate a signed or anonymous request based on the testCase
|
||||
if testCase.accessKey != "" {
|
||||
req, err = newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
|
||||
req, err = newTestSignedRequestV4(http.MethodPost, getDeleteMultipleObjectsURL("", testCase.bucket),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
|
||||
} else {
|
||||
req, err = newTestRequest(http.MethodPost, getDeleteMultipleObjectsURL("", bucketName),
|
||||
req, err = newTestRequest(http.MethodPost, getDeleteMultipleObjectsURL("", testCase.bucket),
|
||||
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request for DeleteMultipleObjects: <ERROR> %v", err)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ type lcEventSrc uint8
|
||||
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
|
||||
const (
|
||||
lcEventSrc_None lcEventSrc = iota
|
||||
lcEventSrc_Heal
|
||||
lcEventSrc_Scanner
|
||||
lcEventSrc_Decom
|
||||
lcEventSrc_Rebal
|
||||
|
||||
@@ -22,12 +22,13 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -63,7 +64,8 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -75,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -86,6 +88,41 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
// Create a map of updated set of rules in request
|
||||
updatedRules := make(map[string]lifecycle.Rule, len(bucketLifecycle.Rules))
|
||||
for _, rule := range bucketLifecycle.Rules {
|
||||
updatedRules[rule.ID] = rule
|
||||
}
|
||||
|
||||
// Get list of rules for the bucket from disk
|
||||
meta, err := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
expiryRuleRemoved := false
|
||||
if len(meta.LifecycleConfigXML) > 0 {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
if err := xml.Unmarshal(meta.LifecycleConfigXML, &lcCfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, rl := range lcCfg.Rules {
|
||||
updRule, ok := updatedRules[rl.ID]
|
||||
// original rule had expiry that is no longer in the new config,
|
||||
// or rule is present but missing expiration flags
|
||||
if (!rl.Expiration.IsNull() || !rl.NoncurrentVersionExpiration.IsNull()) &&
|
||||
(!ok || (updRule.Expiration.IsNull() && updRule.NoncurrentVersionExpiration.IsNull())) {
|
||||
expiryRuleRemoved = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bucketLifecycle.HasExpiry() || expiryRuleRemoved {
|
||||
currtime := time.Now()
|
||||
bucketLifecycle.ExpiryUpdatedAt = &currtime
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -142,6 +179,8 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// explicitly set ExpiryUpdatedAt nil as its meant for internal consumption only
|
||||
config.ExpiryUpdatedAt = nil
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user