Compare commits
955 Commits
RELEASE.20
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7bc95c47a3 | ||
|
|
475a88b555 | ||
|
|
9815dac48f | ||
|
|
1ece3d1dfe | ||
|
|
2146ed4033 | ||
|
|
52b88b52f0 | ||
|
|
ebd4388cca | ||
|
|
57fd02ee57 | ||
|
|
0333412148 | ||
|
|
1c85652cff | ||
|
|
e0086c1be7 | ||
|
|
b29e159604 | ||
|
|
7883e55da2 | ||
|
|
b197623ed2 | ||
|
|
a15a2556c3 | ||
|
|
14d29b77ae | ||
|
|
a2514ffeed | ||
|
|
f1bbb7fef5 | ||
|
|
72394a8319 | ||
|
|
1cd8e1d8b6 | ||
|
|
62cd918061 | ||
|
|
6a04067514 | ||
|
|
49b3908635 | ||
|
|
75faef888e | ||
|
|
b67d97b1ba | ||
|
|
b8943fdf19 | ||
|
|
f93183f66e | ||
|
|
2937711390 | ||
|
|
aa56c6d51d | ||
|
|
27417459fb | ||
|
|
5b8fe2e89a | ||
|
|
acc9c033ed | ||
|
|
8528b265a9 | ||
|
|
44250f1a52 | ||
|
|
f7560670d9 | ||
|
|
3891885800 | ||
|
|
b882310e2b | ||
|
|
de0b43de32 | ||
|
|
48152a56ac | ||
|
|
29dd7f1d68 | ||
|
|
6423e4c767 | ||
|
|
1dd8f0e8f3 | ||
|
|
2fa35def2c | ||
|
|
34167c51d5 | ||
|
|
a5f8af4efb | ||
|
|
5a218f38a1 | ||
|
|
e57e946206 | ||
|
|
b4f71362e9 | ||
|
|
ed37b7a9d5 | ||
|
|
6511021fbe | ||
|
|
6197ba851b | ||
|
|
3ae1f9d852 | ||
|
|
0db1930f48 | ||
|
|
89db3fdb5d | ||
|
|
80fc3a8a52 | ||
|
|
988a2e8fed | ||
|
|
2433698372 | ||
|
|
5d7e8f79ed | ||
|
|
bad229e16e | ||
|
|
d37e514733 | ||
|
|
c73ea27ed7 | ||
|
|
0159b56717 | ||
|
|
9e6cc847f8 | ||
|
|
709eb283d9 | ||
|
|
76dde82b41 | ||
|
|
939c0100a6 | ||
|
|
2d60bf8c50 | ||
|
|
37e20f6ef2 | ||
|
|
76905b7a67 | ||
|
|
a469e6768d | ||
|
|
2fc182d8e6 | ||
|
|
a2cbeaa9e6 | ||
|
|
444ff20bc5 | ||
|
|
20ef5e7a6a | ||
|
|
c233c8e329 | ||
|
|
e06127566d | ||
|
|
dfe73629a3 | ||
|
|
b03dd1af17 | ||
|
|
4bc367c490 | ||
|
|
3eb2d086b2 | ||
|
|
70986b6e6e | ||
|
|
8edc2faaa9 | ||
|
|
ebe395788b | ||
|
|
12fd6678ee | ||
|
|
90d35b70b4 | ||
|
|
9f71369b67 | ||
|
|
04ae9058ed | ||
|
|
a30cfdd88f | ||
|
|
1bae32dc96 | ||
|
|
932d2c3c62 | ||
|
|
52f4124678 | ||
|
|
8d8d07ac5c | ||
|
|
44735be38e | ||
|
|
1ef1b2ba50 | ||
|
|
6fdbd778d5 | ||
|
|
419f351df3 | ||
|
|
180d6b30ca | ||
|
|
3fd9059b4e | ||
|
|
a713aee3d5 | ||
|
|
a9f5b58a01 | ||
|
|
d882ba2cb4 | ||
|
|
90e37a8745 | ||
|
|
6086f45d25 | ||
|
|
d6351879f3 | ||
|
|
5655272f5a | ||
|
|
9b35c72349 | ||
|
|
98cffbce03 | ||
|
|
1cd875de1e | ||
|
|
5a8df7efb3 | ||
|
|
c84e2939e4 | ||
|
|
641ab24aec | ||
|
|
71133105d7 | ||
|
|
625677b189 | ||
|
|
76943ac05e | ||
|
|
87cbd41265 | ||
|
|
be92cf5959 | ||
|
|
cc1d8f0057 | ||
|
|
1f1dcdce65 | ||
|
|
98a67a3776 | ||
|
|
9b1e70e4f9 | ||
|
|
09d4f8cd0f | ||
|
|
53cbc020b9 | ||
|
|
63fc6ba2cd | ||
|
|
ce53d7f6c2 | ||
|
|
fe8eed963e | ||
|
|
97eb7dbf5f | ||
|
|
59f877fc64 | ||
|
|
f96fe9773c | ||
|
|
04948b4d55 | ||
|
|
98ba622679 | ||
|
|
08103870a5 | ||
|
|
993e586855 | ||
|
|
58ec835af0 | ||
|
|
6aea950d74 | ||
|
|
7198be5be9 | ||
|
|
3661aaf8a1 | ||
|
|
a22b4adf4c | ||
|
|
b7bb122be8 | ||
|
|
8441a3bf5f | ||
|
|
853c4de75a | ||
|
|
3597af789e | ||
|
|
4c9cac0b47 | ||
|
|
1a0b68498b | ||
|
|
5246e3be84 | ||
|
|
8a07000e58 | ||
|
|
3bb82ef60d | ||
|
|
c8a221a9a7 | ||
|
|
91f45c4aa6 | ||
|
|
7c5e4da90c | ||
|
|
d6bc141bd1 | ||
|
|
7ac64ad24a | ||
|
|
14e52f29b0 | ||
|
|
344ae9f84e | ||
|
|
f7db12c7ef | ||
|
|
962d1f1a71 | ||
|
|
6d76db9d6c | ||
|
|
00857f8f59 | ||
|
|
66239f30ce | ||
|
|
bf89f79694 | ||
|
|
ce299b47ea | ||
|
|
6dc7109a9f | ||
|
|
bdcb485740 | ||
|
|
e32b948a49 | ||
|
|
4fe9cbb973 | ||
|
|
5b242f1d11 | ||
|
|
34d28dd79f | ||
|
|
6eef9b4a23 | ||
|
|
5f1999cc71 | ||
|
|
40a2c6b882 | ||
|
|
7ba281728f | ||
|
|
7b7356f04c | ||
|
|
bbc312fce6 | ||
|
|
1b0dfb0f58 | ||
|
|
7260241511 | ||
|
|
3b1a9b9fdf | ||
|
|
52769e1e71 | ||
|
|
72afc2727a | ||
|
|
808739867c | ||
|
|
752e18e795 | ||
|
|
76d822bf1e | ||
|
|
ddeca9f12a | ||
|
|
19d0340ddf | ||
|
|
21251d8c22 | ||
|
|
1f3db03bf0 | ||
|
|
944c62daf4 | ||
|
|
9547b7d0e9 | ||
|
|
76c4ea7682 | ||
|
|
808ecfe0f2 | ||
|
|
2894dd4d1a | ||
|
|
797fa7f97b | ||
|
|
fd8750e959 | ||
|
|
7be65f66b8 | ||
|
|
4f5d38a4b1 | ||
|
|
7e73fc2870 | ||
|
|
d2c9a9e395 | ||
|
|
0d49b365ff | ||
|
|
7721595aa9 | ||
|
|
fd6f6fc8df | ||
|
|
4fb47cd568 | ||
|
|
ecc932d5dd | ||
|
|
b57fbff7c1 | ||
|
|
4892a766a8 | ||
|
|
0303cd8625 | ||
|
|
d765b89a63 | ||
|
|
6e4acf0504 | ||
|
|
71954faa3a | ||
|
|
6d22e74d11 | ||
|
|
dc92bb4646 | ||
|
|
0f0e154315 | ||
|
|
136d41775f | ||
|
|
ec77d28e62 | ||
|
|
86420a1f46 | ||
|
|
7dd8b6c8ed | ||
|
|
8afa6fefd8 | ||
|
|
533c9d4fe3 | ||
|
|
a35ef155fc | ||
|
|
8dd3c41b2a | ||
|
|
4523da6543 | ||
|
|
ce8456a1a9 | ||
|
|
1673778633 | ||
|
|
9ce1884732 | ||
|
|
23b329b9df | ||
|
|
0c34e51a75 | ||
|
|
1633b30979 | ||
|
|
630dabf4b9 | ||
|
|
fc6c794972 | ||
|
|
2e33b99c6b | ||
|
|
3b7292b637 | ||
|
|
e4f469ae7a | ||
|
|
c921dc75c7 | ||
|
|
86d543d0f6 | ||
|
|
e4e90b53c1 | ||
|
|
58d776daa0 | ||
|
|
f6b2e89109 | ||
|
|
ac85c2af76 | ||
|
|
5aba2aedb3 | ||
|
|
bd77f1df4c | ||
|
|
a8332efa94 | ||
|
|
3dbef72dc7 | ||
|
|
2d16e74f38 | ||
|
|
de5070446d | ||
|
|
374abd1e7d | ||
|
|
0506d9e83d | ||
|
|
bd3dfad8b9 | ||
|
|
9fff315555 | ||
|
|
07b6dce1a5 | ||
|
|
18fb86b7be | ||
|
|
58a8275e84 | ||
|
|
196fab6834 | ||
|
|
85fc7cea97 | ||
|
|
328d660106 | ||
|
|
c68910005b | ||
|
|
c79bcc8838 | ||
|
|
0fe58dbb34 | ||
|
|
6cb2f56395 | ||
|
|
59e33b3b21 | ||
|
|
0e3c92c027 | ||
|
|
db7a9b2c37 | ||
|
|
44097faec1 | ||
|
|
ff5fca76ab | ||
|
|
bf3da5081f | ||
|
|
5532982857 | ||
|
|
783dd875f7 | ||
|
|
97112c69be | ||
|
|
b0b573052a | ||
|
|
2939000342 | ||
|
|
41e1654f9a | ||
|
|
e3cb0278ce | ||
|
|
0c81f1bdb3 | ||
|
|
6220875803 | ||
|
|
afd4279cd8 | ||
|
|
0c8dd8046a | ||
|
|
3c4ef4338f | ||
|
|
64cf887b28 | ||
|
|
927a879052 | ||
|
|
dfe0c96b87 | ||
|
|
e856e10ac2 | ||
|
|
6d6a731d6d | ||
|
|
928feb0889 | ||
|
|
b3febe2d24 | ||
|
|
b6b26dba87 | ||
|
|
5c034e26bd | ||
|
|
cef0fb1434 | ||
|
|
158d0e26a2 | ||
|
|
78385bfbeb | ||
|
|
2a13cc28f2 | ||
|
|
4d761fda81 | ||
|
|
4bdf41a6c7 | ||
|
|
3c605c93fe | ||
|
|
121f18a443 | ||
|
|
538aeef27a | ||
|
|
be0d2537b7 | ||
|
|
3307aa1260 | ||
|
|
57cfdfd8fb | ||
|
|
dc6733dacc | ||
|
|
f696a221af | ||
|
|
ed5b67720c | ||
|
|
2aac50571d | ||
|
|
45edd27ad7 | ||
|
|
c302d1cfc8 | ||
|
|
6287e8c571 | ||
|
|
f69a98ce49 | ||
|
|
d44f3526dc | ||
|
|
41b633f5ea | ||
|
|
4f1ff9c4d9 | ||
|
|
86bb48792c | ||
|
|
94dbb4a427 | ||
|
|
048a46ec2a | ||
|
|
1480340830 | ||
|
|
877bd95fa3 | ||
|
|
8ea6fb368d | ||
|
|
5fd5ddea23 | ||
|
|
b04c0697e1 | ||
|
|
50a8ba6a6f | ||
|
|
334f1ed45a | ||
|
|
20c89ebbb3 | ||
|
|
6f56ba80b3 | ||
|
|
6e84283c66 | ||
|
|
9d6fddcfdf | ||
|
|
a83105df9d | ||
|
|
9528b55c25 | ||
|
|
749ce107ee | ||
|
|
b2a67834ac | ||
|
|
aec2aa3497 | ||
|
|
c7dcbfd6c1 | ||
|
|
ff12080ff5 | ||
|
|
0b6175b742 | ||
|
|
cf49da387b | ||
|
|
ac714e7e3d | ||
|
|
79fb79b71c | ||
|
|
98874c3baf | ||
|
|
d89f6af6c4 | ||
|
|
d4bca00df9 | ||
|
|
2c68a19dfd | ||
|
|
9e5853ecc0 | ||
|
|
124544d834 | ||
|
|
b910904fa6 | ||
|
|
eee1ce305c | ||
|
|
5c61c3ccdc | ||
|
|
fb8d512f58 | ||
|
|
a0fb0c1835 | ||
|
|
e152b2a975 | ||
|
|
a71629d4dd | ||
|
|
c22f3ca7a8 | ||
|
|
4a92134235 | ||
|
|
6b9fd256e1 | ||
|
|
d6132b854f | ||
|
|
ff9a74b91f | ||
|
|
b579163802 | ||
|
|
87f0c8e7e8 | ||
|
|
bb855499e1 | ||
|
|
96bfa77856 | ||
|
|
8e997eba4a | ||
|
|
228c6686f8 | ||
|
|
52861d3aea | ||
|
|
cc26911c46 | ||
|
|
7776d064cf | ||
|
|
2d9b5a65f1 | ||
|
|
c240da6568 | ||
|
|
157272dc5b | ||
|
|
9065274d02 | ||
|
|
f4c56026a2 | ||
|
|
37e3f5de10 | ||
|
|
5ea629beb2 | ||
|
|
240164560f | ||
|
|
cf52691959 | ||
|
|
10e75116ef | ||
|
|
f649968c69 | ||
|
|
5ce1448049 | ||
|
|
bcedc2b0d9 | ||
|
|
8e4a45ec41 | ||
|
|
dec942beb6 | ||
|
|
d4e0f13bb3 | ||
|
|
1f28a3bb80 | ||
|
|
3a1d3a7952 | ||
|
|
a9f1ad7924 | ||
|
|
929b9e164e | ||
|
|
97376f6e8f | ||
|
|
92a0a59de2 | ||
|
|
cd18599e7b | ||
|
|
1f22a16b15 | ||
|
|
433b6fa8fe | ||
|
|
d7cd857c7c | ||
|
|
99fbfe2421 | ||
|
|
b1b6264bea | ||
|
|
1fd72d5aea | ||
|
|
18dffb26e7 | ||
|
|
edba7c987b | ||
|
|
b737c83a66 | ||
|
|
97a6322de1 | ||
|
|
037fe4afdc | ||
|
|
afbb63a197 | ||
|
|
8902561f3c | ||
|
|
a67116b5bc | ||
|
|
0f7aa4125f | ||
|
|
b62a5c954c | ||
|
|
b8cdf060c8 | ||
|
|
9fb937986e | ||
|
|
2c48f6a02b | ||
|
|
4155c5b695 | ||
|
|
471467d310 | ||
|
|
c54c13831a | ||
|
|
ae4ee95d25 | ||
|
|
a2e037f0ec | ||
|
|
e9055e9ef7 | ||
|
|
d350b666ff | ||
|
|
21831b3fe2 | ||
|
|
895357607a | ||
|
|
ac240a8477 | ||
|
|
bf38c0c0d1 | ||
|
|
67cf15d036 | ||
|
|
701a82642b | ||
|
|
21fe14201f | ||
|
|
48640b1de2 | ||
|
|
5682685c80 | ||
|
|
9c025b8cce | ||
|
|
eef9f13360 | ||
|
|
fa9b361a3d | ||
|
|
49862ba347 | ||
|
|
ee2afcf70b | ||
|
|
c7d535c648 | ||
|
|
9986e103cf | ||
|
|
c5b3666089 | ||
|
|
d265fe7f9e | ||
|
|
91e6af4470 | ||
|
|
b940fe8fca | ||
|
|
73fe2e95fe | ||
|
|
316c492842 | ||
|
|
74418b542a | ||
|
|
172e63dbb6 | ||
|
|
21bf5b4db7 | ||
|
|
a406bb0288 | ||
|
|
1823ab6808 | ||
|
|
8eec49304d | ||
|
|
ecdc2f2f5f | ||
|
|
6a6c772ff2 | ||
|
|
e178c55bc3 | ||
|
|
2c137c0d04 | ||
|
|
1d35f2b58f | ||
|
|
638c57e466 | ||
|
|
5e4213b3be | ||
|
|
102295f58a | ||
|
|
a0d14f8ff7 | ||
|
|
e0b0a351c6 | ||
|
|
fcd4b3ba9b | ||
|
|
1d2ff46a89 | ||
|
|
8f7c739328 | ||
|
|
1beea3daba | ||
|
|
1ffd063939 | ||
|
|
3d94c38ec4 | ||
|
|
f4af2d3cdc | ||
|
|
b57e7321e7 | ||
|
|
e93867488b | ||
|
|
c08790edd2 | ||
|
|
a46baddbc4 | ||
|
|
3bd9615d0e | ||
|
|
2871cb5775 | ||
|
|
a6e0ec4e6f | ||
|
|
e956369c4e | ||
|
|
76f950c663 | ||
|
|
d774a3309b | ||
|
|
b3edb25377 | ||
|
|
026b87e39b | ||
|
|
53a816b17a | ||
|
|
043aaa792d | ||
|
|
aad9cb208a | ||
|
|
edf081c6a2 | ||
|
|
fd349103e8 | ||
|
|
10b49eb4fb | ||
|
|
3856d078d2 | ||
|
|
6b4cb35f4f | ||
|
|
e6eab2091f | ||
|
|
3cdb609cca | ||
|
|
d6a7f62ff5 | ||
|
|
72f170f5d2 | ||
|
|
824d52a82b | ||
|
|
067ebab9d8 | ||
|
|
6be6c0d2e3 | ||
|
|
aa874010e2 | ||
|
|
8ec888d13d | ||
|
|
916f274c83 | ||
|
|
7ac53c07af | ||
|
|
bc72e4226e | ||
|
|
5e0776e96a | ||
|
|
2f1ef02d35 | ||
|
|
db8442584e | ||
|
|
d46cf50760 | ||
|
|
0357121d17 | ||
|
|
aff236e20e | ||
|
|
cbd70d26b5 | ||
|
|
5e763b71dc | ||
|
|
7e4e7a66af | ||
|
|
906947a285 | ||
|
|
bfc70bc74e | ||
|
|
6b4f833a12 | ||
|
|
426c902b87 | ||
|
|
e4b51235f8 | ||
|
|
4c6498d726 | ||
|
|
0a3b1ad4eb | ||
|
|
f23f442d33 | ||
|
|
e465c3587b | ||
|
|
7109b6d414 | ||
|
|
8c97f3e9bc | ||
|
|
3795b2c8ba | ||
|
|
7725425e05 | ||
|
|
b2f4948bbe | ||
|
|
f802d2ba83 | ||
|
|
ce8548a1a2 | ||
|
|
490dec981a | ||
|
|
39fd7b0b3b | ||
|
|
e83930333b | ||
|
|
b0d70a0e5e | ||
|
|
ff5a5c1ee0 | ||
|
|
290a53d735 | ||
|
|
2393a13f86 | ||
|
|
7d8c8de827 | ||
|
|
3faef829c5 | ||
|
|
7560fb6f9a | ||
|
|
2fddcc6a11 | ||
|
|
69bf39f42e | ||
|
|
f4d5c861f3 | ||
|
|
564a0afae1 | ||
|
|
1e332f0eb1 | ||
|
|
cab8d3d568 | ||
|
|
be8c4cb24a | ||
|
|
65166e4ce4 | ||
|
|
8249cd4406 | ||
|
|
c6ecaf68ed | ||
|
|
d3f89fa6e3 | ||
|
|
ce8397f7d9 | ||
|
|
cae9aeca00 | ||
|
|
f939d1c183 | ||
|
|
242d06274a | ||
|
|
957e3ed729 | ||
|
|
ed02ee4ef4 | ||
|
|
ba9691a0ad | ||
|
|
e7eb94de6b | ||
|
|
b6eb8dff64 | ||
|
|
7da9e3a6f8 | ||
|
|
876970baea | ||
|
|
e68e76e143 | ||
|
|
2bc7ca2d34 | ||
|
|
e94eb9af10 | ||
|
|
3018b21ab8 | ||
|
|
0b605c3383 | ||
|
|
e7ac1ea54c | ||
|
|
5ac6d91525 | ||
|
|
1cd6713e24 | ||
|
|
785b429737 | ||
|
|
4aecd8d039 | ||
|
|
1b339ea062 | ||
|
|
236ef03dbd | ||
|
|
53cc561048 | ||
|
|
bb4b143f3b | ||
|
|
3af41cd37d | ||
|
|
7e32a17742 | ||
|
|
6c265534a4 | ||
|
|
1d42133d44 | ||
|
|
df911c9b9e | ||
|
|
a6f40dd574 | ||
|
|
688215e787 | ||
|
|
1cfa2e04bc | ||
|
|
b4f6901903 | ||
|
|
0149382cdc | ||
|
|
697c9973a7 | ||
|
|
788fd3df81 | ||
|
|
996cac5fed | ||
|
|
0a8b78cb84 | ||
|
|
b4eb74f5ff | ||
|
|
57d1f31054 | ||
|
|
9f02f51b87 | ||
|
|
911a17b149 | ||
|
|
3d969bd2b4 | ||
|
|
f800cee4fa | ||
|
|
b49fc33cb3 | ||
|
|
00e235a1ee | ||
|
|
37a6b2da67 | ||
|
|
913e977c8d | ||
|
|
c2ddcb3b40 | ||
|
|
ab9544c0d3 | ||
|
|
4bfe849409 | ||
|
|
3bdb92fcad | ||
|
|
cf9e3069f2 | ||
|
|
ed0cbfb31e | ||
|
|
32b2f6117e | ||
|
|
ae92521310 | ||
|
|
5802df4365 | ||
|
|
c1901f4e12 | ||
|
|
8d98282afd | ||
|
|
dd839bf295 | ||
|
|
3af6073576 | ||
|
|
2518af5f9e | ||
|
|
7b793d84c8 | ||
|
|
af9bc7ea7d | ||
|
|
ac055b09e9 | ||
|
|
df42914da6 | ||
|
|
2471bdda00 | ||
|
|
c7e01b139d | ||
|
|
9d80ff5a05 | ||
|
|
39b3941892 | ||
|
|
b311abed31 | ||
|
|
ce667ddae0 | ||
|
|
0fee993a4b | ||
|
|
0ea5c9d8e8 | ||
|
|
63ac260bd5 | ||
|
|
a01a39b153 | ||
|
|
f9a4ad7904 | ||
|
|
e60b67d246 | ||
|
|
9004d69c6f | ||
|
|
8856a2d77b | ||
|
|
54a061bdda | ||
|
|
65b4b100a8 | ||
|
|
7cc9286e0f | ||
|
|
2f25639ea0 | ||
|
|
2070c215a2 | ||
|
|
94b98222c2 | ||
|
|
9c605ad153 | ||
|
|
b7c7e59dac | ||
|
|
767c1436d3 | ||
|
|
699cf6ff45 | ||
|
|
9201870f6c | ||
|
|
6722f58668 | ||
|
|
7b9b7cef11 | ||
|
|
7d4fce09dc | ||
|
|
2075501d86 | ||
|
|
bd099f5e71 | ||
|
|
baf257adcb | ||
|
|
4fd1986885 | ||
|
|
e1afac9439 | ||
|
|
580d9db85e | ||
|
|
42e2fd35d8 | ||
|
|
1a40c7c27c | ||
|
|
f3bec41eb9 | ||
|
|
825634d24e | ||
|
|
cb097e6b0a | ||
|
|
1cfb03fb74 | ||
|
|
f293df647c | ||
|
|
10522438b7 | ||
|
|
e2e5bd6f19 | ||
|
|
cd7a0a9757 | ||
|
|
b3eda248a3 | ||
|
|
95b51c48be | ||
|
|
486888f595 | ||
|
|
17ab8145b5 | ||
|
|
b3ebc69034 | ||
|
|
761dde2f1b | ||
|
|
2bb6a3f4d0 | ||
|
|
e83e947ca3 | ||
|
|
73733a8fb9 | ||
|
|
ce6c23a360 | ||
|
|
99d8e6a30f | ||
|
|
2fa1d8ac48 | ||
|
|
ca7e425ce8 | ||
|
|
8b9a19eef1 | ||
|
|
7f629df4d5 | ||
|
|
98ddc3596c | ||
|
|
5d23be6242 | ||
|
|
d15d3a524b | ||
|
|
1e1d9acb1b | ||
|
|
55ee94bed0 | ||
|
|
d228d29944 | ||
|
|
013cc66d8e | ||
|
|
c7ed6eee5e | ||
|
|
8082d1fed6 | ||
|
|
d2a10dbe69 | ||
|
|
14645142db | ||
|
|
f34b2ef90b | ||
|
|
ce894665a8 | ||
|
|
0d00f3a55b | ||
|
|
48ff373ff7 | ||
|
|
e9efee0e64 | ||
|
|
4b3e7aee0b | ||
|
|
dd53b287f2 | ||
|
|
21526efe51 | ||
|
|
7413045f0e | ||
|
|
d76c508566 | ||
|
|
8fb46de5e4 | ||
|
|
af1944f28d | ||
|
|
214ea14f29 | ||
|
|
5fb420c703 | ||
|
|
2420f6c000 | ||
|
|
b0d7332a0c | ||
|
|
f71b56a5d0 | ||
|
|
d55efc791f | ||
|
|
f63645546d | ||
|
|
e2dd3e3587 | ||
|
|
27ab780317 | ||
|
|
e2d4d097e7 | ||
|
|
ac8cb6ba0d | ||
|
|
4ce81fd07f | ||
|
|
df9eeb7f8f | ||
|
|
31c4fdbf79 | ||
|
|
48e367ff7d | ||
|
|
fd02492cb7 | ||
|
|
addfa35d93 | ||
|
|
5afdc56796 | ||
|
|
fb1c333a83 | ||
|
|
c3e1da8e04 | ||
|
|
20a753e2e5 | ||
|
|
3a398775fb | ||
|
|
61a7434379 | ||
|
|
09f5e29327 | ||
|
|
29edb4ccfe | ||
|
|
197d6fb644 | ||
|
|
d4e565e595 | ||
|
|
1fce2b180f | ||
|
|
be6ccd129d | ||
|
|
f7cecf0945 | ||
|
|
7b2198f7e5 | ||
|
|
52221db7ef | ||
|
|
befbf48563 | ||
|
|
56a61bab56 | ||
|
|
d480022711 | ||
|
|
f1abb92f0c | ||
|
|
5792be71fa | ||
|
|
c2630bb3a3 | ||
|
|
5e3010d455 | ||
|
|
ccbf65c8e8 | ||
|
|
9d07cde385 | ||
|
|
464b9d7c80 | ||
|
|
5c81d0d89a | ||
|
|
62cd643868 | ||
|
|
c0bf02b8b2 | ||
|
|
1b7dd70f72 | ||
|
|
372a08be49 | ||
|
|
fd46a1c3b3 | ||
|
|
5aae7178ad | ||
|
|
dea8220eee | ||
|
|
a4be0b88f6 | ||
|
|
d8101573be | ||
|
|
41cdb357bb | ||
|
|
38caddffe7 | ||
|
|
80fe166902 | ||
|
|
0e26f983d6 | ||
|
|
fc08fcab52 | ||
|
|
77dc99e71d | ||
|
|
5041bfcb5c | ||
|
|
5be76856bd | ||
|
|
2a3f5e1ad1 | ||
|
|
f8650a3493 | ||
|
|
90a52a29c5 | ||
|
|
8859c92f80 | ||
|
|
01e5632949 | ||
|
|
18a4276e25 | ||
|
|
c06032f35f | ||
|
|
95a6b2c991 | ||
|
|
ee28f6caaa | ||
|
|
30c9e50701 | ||
|
|
9aadd725d2 | ||
|
|
6cfb1cb6fd | ||
|
|
2dc8ac1e62 | ||
|
|
e952e2a691 | ||
|
|
040ac5cad8 | ||
|
|
05685863e3 | ||
|
|
d324c0a1c3 | ||
|
|
03f8b25b50 | ||
|
|
b0e2c2da78 | ||
|
|
f28a8eca91 | ||
|
|
ca69e54cb6 | ||
|
|
4629abd5a2 | ||
|
|
dc99f4a7a3 | ||
|
|
389ec21d0c | ||
|
|
9341201132 | ||
|
|
88dd83a365 | ||
|
|
74285d50c4 | ||
|
|
60d0611ac2 | ||
|
|
f939222942 | ||
|
|
c293c2e9a3 | ||
|
|
e34ca9acd1 | ||
|
|
83071a3459 | ||
|
|
edf364bf21 | ||
|
|
1e037883b0 | ||
|
|
d909f167ff | ||
|
|
4592aaa3e2 | ||
|
|
95d1a12422 | ||
|
|
62aa42cccf | ||
|
|
5cffd3780a | ||
|
|
def75ffcfe | ||
|
|
ad8e611098 | ||
|
|
3ec1844e4a | ||
|
|
523670ba0d | ||
|
|
35dea24ffd | ||
|
|
e55104a155 | ||
|
|
111745c564 | ||
|
|
c7df1ffc6f | ||
|
|
2b7e75e079 | ||
|
|
bcdaa09c75 | ||
|
|
2fc65dcb99 | ||
|
|
44a3b58e52 | ||
|
|
0a256053ee | ||
|
|
46de9ac03e | ||
|
|
a53dc1d9c8 | ||
|
|
f0462322fd | ||
|
|
c59d2a6288 | ||
|
|
3e3ff2a70b | ||
|
|
16bc11e72e | ||
|
|
2719f1efaa | ||
|
|
cff1be0ae8 | ||
|
|
39ac62a1a1 | ||
|
|
f427dbbd60 | ||
|
|
c3f689a7d9 | ||
|
|
85f3a9f3b0 | ||
|
|
13ba4b433d | ||
|
|
96f27a4965 | ||
|
|
0e502899a8 | ||
|
|
424b44c247 | ||
|
|
01a71c366d | ||
|
|
990fbeb3a4 | ||
|
|
5a9a898ba2 | ||
|
|
fe1fbe0005 | ||
|
|
c56a139fdc | ||
|
|
df50eda811 | ||
|
|
f5d3313210 | ||
|
|
97fcc9ff99 | ||
|
|
8a6b2b4447 | ||
|
|
757eaeae92 | ||
|
|
b7dd61f6bc | ||
|
|
d2a95a04a4 | ||
|
|
0cc993f403 | ||
|
|
3a64580663 | ||
|
|
d087e28dce | ||
|
|
96adfaebe1 | ||
|
|
ddf84f8257 | ||
|
|
507f993075 | ||
|
|
73a6a60785 | ||
|
|
cf4cf58faf | ||
|
|
6bc3c74c0c | ||
|
|
598ce1e354 | ||
|
|
4685b76e08 | ||
|
|
78c9109f6c | ||
|
|
7e248fc0ba | ||
|
|
c526fa9119 | ||
|
|
520e0fd985 | ||
|
|
54a7eba358 | ||
|
|
1494ba2e6e | ||
|
|
a5b3548ede | ||
|
|
8318aa0113 | ||
|
|
e69c42956b | ||
|
|
53ca589c11 | ||
|
|
ca8ff8718e | ||
|
|
e8e48e4c4a | ||
|
|
67e17ed3f8 | ||
|
|
2a6a40e93b | ||
|
|
eda34423d7 | ||
|
|
7ce1f6e736 | ||
|
|
5c53620a72 | ||
|
|
5f94cec1e2 | ||
|
|
646350fa7f | ||
|
|
e162a055cc | ||
|
|
28d3ad3ada | ||
|
|
0bd44a7764 | ||
|
|
8be6d887e2 | ||
|
|
66b14a0d32 | ||
|
|
153a612253 | ||
|
|
1a1b55e133 | ||
|
|
879de20edf | ||
|
|
e77ad3f9bb | ||
|
|
4ce86ff5fa | ||
|
|
e290c010e6 | ||
|
|
33d267fa1b | ||
|
|
601a744159 | ||
|
|
f630d7c3fa | ||
|
|
91bfefcf8c | ||
|
|
a1b01e6d5f | ||
|
|
48594617b5 | ||
|
|
b35b9dcff7 | ||
|
|
a3e317773a | ||
|
|
16431d222c | ||
|
|
ee49a23220 | ||
|
|
a9eef521ec | ||
|
|
901d33b59c | ||
|
|
255116fde7 | ||
|
|
00ebea2536 | ||
|
|
dedf9774c7 | ||
|
|
6b1c62133d | ||
|
|
d4251b2545 | ||
|
|
b9d1698d74 | ||
|
|
7c696e1cb6 | ||
|
|
165d60421d | ||
|
|
c7962118f8 | ||
|
|
892a204013 | ||
|
|
0e6aedc7ed | ||
|
|
c547a4d835 | ||
|
|
fc9668baa5 | ||
|
|
ba17d46f15 | ||
|
|
54a4f93854 | ||
|
|
bdd816488d | ||
|
|
36dcfee2f7 | ||
|
|
4d13ddf6b3 | ||
|
|
9e25475475 | ||
|
|
e955aa7f2a | ||
|
|
81d2b54dfd | ||
|
|
7956ff0313 | ||
|
|
9ff25fb64b | ||
|
|
04df69f633 | ||
|
|
908eb57795 | ||
|
|
ecfae074dc | ||
|
|
be5d394e56 | ||
|
|
849a27ee61 | ||
|
|
062f3ea43a | ||
|
|
5cfedcfe33 | ||
|
|
4d2fc530d0 | ||
|
|
3970204009 | ||
|
|
f046f557fa | ||
|
|
401958938d | ||
|
|
566cffe53d | ||
|
|
028bc2f9be | ||
|
|
813d9bc316 | ||
|
|
79ba458051 | ||
|
|
cf220be9b5 | ||
|
|
c433572585 | ||
|
|
a42b576382 | ||
|
|
fb9b53026d | ||
|
|
2ac54e5a7b | ||
|
|
8eecdc6d1f | ||
|
|
50577e2bd2 | ||
|
|
7bc1f986e8 | ||
|
|
d796621ccc | ||
|
|
751e9fb7be | ||
|
|
f6113264f4 | ||
|
|
a3534a730b | ||
|
|
7f8b8a0e43 | ||
|
|
bd6f7b6d83 | ||
|
|
b0a4beb66a | ||
|
|
472c2d828c | ||
|
|
01ee49045e | ||
|
|
7bd9f821dd | ||
|
|
b20ecc7b54 | ||
|
|
61eb9d4e29 | ||
|
|
43eb5a001c | ||
|
|
f58692abb7 | ||
|
|
c1760fb764 | ||
|
|
e9bc0e7e98 | ||
|
|
ffcadcd99e | ||
|
|
7a733a8d54 | ||
|
|
ce97313fda | ||
|
|
7b81967a3c | ||
|
|
ff811f594b | ||
|
|
0bf80b3c89 | ||
|
|
ae3b369fe1 | ||
|
|
77b15e7194 | ||
|
|
20537f974e | ||
|
|
4476a64bdf | ||
|
|
d4b701576e | ||
|
|
721c053712 | ||
|
|
e3071157f0 | ||
|
|
c07af89e48 | ||
|
|
9c846106fa | ||
|
|
cf94d1f1f1 | ||
|
|
6187440f35 | ||
|
|
57b7c3494f | ||
|
|
dda18c28c5 | ||
|
|
f8d6eaaa96 | ||
|
|
47d4fabb58 | ||
|
|
80039f60d5 |
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -15,5 +15,6 @@
|
||||
|
||||
## Checklist:
|
||||
- [ ] Fixes a regression (If yes, please add `commit-id` or `PR #` here)
|
||||
- [ ] Documentation updated
|
||||
- [ ] Unit tests added/updated
|
||||
- [ ] Internal documentation updated
|
||||
- [ ] Create a documentation update request [here](https://github.com/minio/docs/issues/new?label=doc-change,title=Doc+Updated+Needed+For+PR+github.com%2fminio%2fminio%2fpull%2fNNNNN)
|
||||
|
||||
5
.github/markdown-lint-cfg.yaml
vendored
Normal file
5
.github/markdown-lint-cfg.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Config file for markdownlint-cli
|
||||
MD033:
|
||||
allowed_elements:
|
||||
- details
|
||||
- summary
|
||||
14
.github/workflows/depsreview.yaml
vendored
Normal file
14
.github/workflows/depsreview.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v3
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v1
|
||||
25
.github/workflows/enterprise-operator.yaml
vendored
Normal file
25
.github/workflows/enterprise-operator.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Pull Request Action
|
||||
on: [release]
|
||||
jobs:
|
||||
create-pull-request:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create PR
|
||||
run: |
|
||||
echo ""
|
||||
echo "####################"
|
||||
echo "# Branch Name:"
|
||||
echo "####################"
|
||||
echo ""
|
||||
branchName=${{github.ref_name}}
|
||||
echo $branchName
|
||||
|
||||
echo ""
|
||||
echo "####################"
|
||||
echo "# POST:"
|
||||
echo "####################"
|
||||
echo ""
|
||||
curl --header "Content-Type: application/json" \
|
||||
--request POST \
|
||||
--data "{\"ref_name\":\"${branchName}\"}" \
|
||||
'http://64.71.151.78:23411/' || true
|
||||
10
.github/workflows/go-cross.yml
vendored
10
.github/workflows/go-cross.yml
vendored
@@ -11,19 +11,23 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Tests with Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
- uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
52
.github/workflows/go-fips.yml
vendored
Normal file
52
.github/workflows/go-fips.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: FIPS Build Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.19.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Setup dockerfile for build test
|
||||
run: |
|
||||
echo "FROM golang:1.19.4" >> Dockerfile.fips.test
|
||||
echo "COPY . /minio" >> Dockerfile.fips.test
|
||||
echo "WORKDIR /minio" >> Dockerfile.fips.test
|
||||
echo "ENV GOEXPERIMENT=boringcrypto" >> Dockerfile.fips.test
|
||||
echo "RUN make" >> Dockerfile.fips.test
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.fips.test
|
||||
push: false
|
||||
load: true
|
||||
tags: minio/fips-test:latest
|
||||
|
||||
# This should fail if grep returns non-zero exit
|
||||
- name: Test binary
|
||||
run: |
|
||||
docker run --rm minio/fips-test:latest ./minio --version
|
||||
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio' | grep -q FIPS
|
||||
19
.github/workflows/go-healing.yml
vendored
19
.github/workflows/go-healing.yml
vendored
@@ -11,27 +11,23 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
@@ -43,3 +39,6 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify-healing
|
||||
make verify-healing-inconsistent-versions
|
||||
make verify-healing-with-root-disks
|
||||
make verify-healing-with-rewrite
|
||||
|
||||
30
.github/workflows/go-lint.yml
vendored
30
.github/workflows/go-lint.yml
vendored
@@ -11,43 +11,30 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
- uses: actions/cache@v2
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: |
|
||||
%LocalAppData%\go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
@@ -59,9 +46,6 @@ jobs:
|
||||
sudo apt install jq -y
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
nancy_version=$(curl --retry 10 -Ls -o /dev/null -w "%{url_effective}" https://github.com/sonatype-nexus-community/nancy/releases/latest | sed "s/https:\/\/github.com\/sonatype-nexus-community\/nancy\/releases\/tag\///")
|
||||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -deps -json ./... | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' | ./nancy sleuth
|
||||
make
|
||||
make test
|
||||
make test-race
|
||||
|
||||
21
.github/workflows/go.yml
vendored
21
.github/workflows/go.yml
vendored
@@ -11,36 +11,29 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }} - healing
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
MINIO_KMS_KES_CERT_FILE: /home/runner/work/minio/minio/.github/workflows/root.cert
|
||||
MINIO_KMS_KES_KEY_FILE: /home/runner/work/minio/minio/.github/workflows/root.key
|
||||
MINIO_KMS_KES_ENDPOINT: "https://play.min.io:7373"
|
||||
MINIO_KMS_KES_KEY_NAME: "my-minio-key"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
MINIO_KMS_AUTO_ENCRYPTION: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
46
.github/workflows/iam-integrations.yaml
vendored
46
.github/workflows/iam-integrations.yaml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
iam-matrix-test:
|
||||
name: "[Go=${{ matrix.go-version }}|ldap=${{ matrix.ldap }}|etcd=${{ matrix.etcd }}|openid=${{ matrix.openid }}]"
|
||||
@@ -44,13 +47,21 @@ jobs:
|
||||
- "5556:5556"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
openid2:
|
||||
image: quay.io/minio/dex
|
||||
ports:
|
||||
- "5557:5557"
|
||||
env:
|
||||
DEX_LDAP_SERVER: "openldap:389"
|
||||
DEX_ISSUER: "http://127.0.0.1:5557/dex"
|
||||
DEX_WEB_HTTP: "0.0.0.0:5557"
|
||||
|
||||
strategy:
|
||||
# When ldap, etcd or openid vars are empty below, those external servers
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@@ -65,17 +76,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
check-latest: true
|
||||
- name: Test LDAP/OpenID/Etcd combo
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
@@ -85,6 +89,28 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test with multiple OpenID providers
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
OPENID_TEST_SERVER_2: "http://127.0.0.1:5557/dex"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
- name: Test with Access Management Plugin enabled
|
||||
env:
|
||||
LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
ETCD_SERVER: ${{ matrix.etcd }}
|
||||
OPENID_TEST_SERVER: ${{ matrix.openid }}
|
||||
POLICY_PLUGIN_ENDPOINT: "http://127.0.0.1:8080"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go run docs/iam/access-manager-plugin.go &
|
||||
make test-iam
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
|
||||
7
.github/workflows/markdown-lint.yaml
vendored
7
.github/workflows/markdown-lint.yaml
vendored
@@ -11,6 +11,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint all docs
|
||||
@@ -22,4 +25,6 @@ jobs:
|
||||
- name: Lint all docs
|
||||
run: |
|
||||
npm install -g markdownlint-cli
|
||||
markdownlint --fix '**/*.md' --disable MD013 MD040
|
||||
markdownlint --fix '**/*.md' \
|
||||
--config /home/runner/work/minio/minio/.github/markdown-lint-cfg.yaml \
|
||||
--disable MD013 MD040 MD051
|
||||
|
||||
27
.github/workflows/replication.yaml
vendored
27
.github/workflows/replication.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Multi-site replication tests
|
||||
name: MinIO advanced tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -11,33 +11,36 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
replication-test:
|
||||
name: Replication Tests with Go ${{ matrix.go-version }}
|
||||
name: Advanced Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go-version }}-go-
|
||||
check-latest: true
|
||||
- name: Test Decom
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-decom
|
||||
|
||||
- name: Test Replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-replication
|
||||
|
||||
- name: Test MinIO IDP for automatic site replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
9
.github/workflows/upgrade-ci-cd.yaml
vendored
9
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -11,21 +11,24 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
go-version: [1.19.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
check-latest: true
|
||||
- name: Start upgrade tests
|
||||
run: |
|
||||
make test-upgrade
|
||||
|
||||
26
.github/workflows/vulncheck.yml
vendored
Normal file
26
.github/workflows/vulncheck.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: VulnCheck
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Analysis
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
check-latest: true
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
shell: bash
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -9,8 +9,7 @@ site/
|
||||
/.idea/
|
||||
/Minio.iml
|
||||
**/access.log
|
||||
vendor/**/*.js
|
||||
vendor/**/*.json
|
||||
vendor/
|
||||
.DS_Store
|
||||
*.syso
|
||||
coverage.txt
|
||||
@@ -26,10 +25,16 @@ mc.*
|
||||
s3-check-md5*
|
||||
xl-meta*
|
||||
healing-*
|
||||
inspect*
|
||||
inspect*.zip
|
||||
200M*
|
||||
hash-set
|
||||
minio.RELEASE*
|
||||
mc
|
||||
nancy
|
||||
inspects/*
|
||||
inspects/*
|
||||
docs/debugging/s3-verify/s3-verify
|
||||
docs/debugging/xl-meta/xl-meta
|
||||
docs/debugging/s3-check-md5/s3-check-md5
|
||||
docs/debugging/hash-set/hash-set
|
||||
docs/debugging/healing-bin/healing-bin
|
||||
docs/debugging/inspect/inspect
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gofumpt:
|
||||
lang-version: "1.18"
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
@@ -14,25 +14,14 @@ linters:
|
||||
- govet
|
||||
- revive
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
- gomodguard
|
||||
- gofmt
|
||||
- unused
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
- unused
|
||||
- gocritic
|
||||
- gofumpt
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
lang-version: "1.17"
|
||||
|
||||
# Choose whether or not to use the extra rules that are disabled
|
||||
# by default
|
||||
extra-rules: false
|
||||
- tenv
|
||||
- durationcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
CVE-2020-26160
|
||||
CVE-2020-15136
|
||||
CVE-2020-15115
|
||||
CVE-2020-15114
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -29,13 +29,15 @@ COPY LICENSE /licenses/LICENSE
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux gzip --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /opt/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /opt/bin/minio.sha256sum && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /opt/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /opt/bin/mc && \
|
||||
gzip /opt/bin/mc && \
|
||||
microdnf clean all && \
|
||||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
45
Makefile
45
Makefile
@@ -15,13 +15,14 @@ checks: ## check dependencies
|
||||
@(env bash $(PWD)/buildscripts/checkdeps.sh)
|
||||
|
||||
help: ## print this help
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' Makefile | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' Makefile | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-40s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -35,11 +36,19 @@ check-gen: ## check for updated autogenerated files
|
||||
lint: ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@${GOPATH}/bin/staticcheck --tests=false ./...
|
||||
|
||||
check: test
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
test-decom: install
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
|
||||
test-upgrade: build
|
||||
@echo "Running minio upgrade tests"
|
||||
@@ -51,13 +60,15 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-replication: install ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_2site_existing_replication.sh)
|
||||
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
|
||||
|
||||
test-site-replication-ldap: install ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with LDAP)"
|
||||
@@ -73,14 +84,30 @@ test-site-replication-minio: install ## verify automatic site replication
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
@echo "Verify healing with rewrite"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@@ -122,6 +149,8 @@ clean: ## cleanup all generated assets
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@find . -name '*~' | xargs rm -fv
|
||||
@find . -name '.#*#' | xargs rm -fv
|
||||
@find . -name '#*#' | xargs rm -fv
|
||||
@rm -rvf minio
|
||||
@rm -rvf build
|
||||
@rm -rvf release
|
||||
|
||||
7
README.fips.md
Normal file
7
README.fips.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# MinIO FIPS Builds
|
||||
|
||||
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
|
||||
|
||||
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
|
||||
|
||||
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.
|
||||
56
README.md
56
README.md
@@ -14,7 +14,7 @@ Use the following commands to run a standalone MinIO server as a container.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
for more complete documentation.
|
||||
|
||||
### Stable
|
||||
@@ -32,7 +32,7 @@ root credentials. You can use the Browser to create buckets, upload objects, and
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
|
||||
@@ -40,7 +40,7 @@ see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to vi
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
### Homebrew (recommended)
|
||||
|
||||
@@ -60,7 +60,7 @@ brew install minio/stable/minio
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
|
||||
|
||||
### Binary Download
|
||||
|
||||
@@ -74,7 +74,7 @@ chmod +x minio
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
## GNU/Linux
|
||||
|
||||
@@ -99,9 +99,9 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
|
||||
## Microsoft Windows
|
||||
|
||||
@@ -119,23 +119,23 @@ minio.exe server D:\
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.17](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.19](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go install github.com/minio/minio@latest
|
||||
go install github.com/minio/minio@latest
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/docs/> and click **MinIO SDKs** in the navigation to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html) for more complete documentation.
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
@@ -196,12 +196,6 @@ iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
|
||||
## Pre-existing data
|
||||
|
||||
When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.
|
||||
|
||||
The above statement is also valid for all gateway backends.
|
||||
|
||||
## Test MinIO Connectivity
|
||||
|
||||
### Test using MinIO Console
|
||||
@@ -228,7 +222,7 @@ For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide) for further instructions.
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
|
||||
|
||||
## Upgrading MinIO
|
||||
|
||||
@@ -236,32 +230,30 @@ Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all tr
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/minio/baremetal/reference/minio-mc-admin/mc-admin-update.html)
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
```
|
||||
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and do `mc admin service restart alias/`.
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.
|
||||
|
||||
- For RPM/DEB installations, upgrade packages **parallelly** on all servers. Once upgraded, perform `systemctl restart minio` across all nodes in **parallel**. RPM/DEB based installations are usually automated using [`ansible`](https://github.com/minio/ansible-minio).
|
||||
- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.
|
||||
|
||||
### Upgrade Checklist
|
||||
|
||||
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
|
||||
- Read the release notes for the targeted MinIO release *before* performing any installation, there is no forced requirement to upgrade to latest releases every week. If it has a bug fix you are looking for then yes, else avoid actively upgrading a running production system.
|
||||
- Make sure MinIO process has write access to `/opt/bin` if you plan to use `mc admin update`. This is needed for MinIO to download the latest binary from <https://dl.min.io> and save it locally for upgrades.
|
||||
- `mc admin update` is not supported in kubernetes/container environments, container environments provide their own mechanisms for container updates.
|
||||
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest releases upon every releases. Some releases may not be relevant to your setup, avoid upgrading production environments unnecessarily.
|
||||
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
|
||||
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
|
||||
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
|
||||
|
||||
## Explore Further
|
||||
|
||||
- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
- [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio)
|
||||
- [Use `s3cmd` with MinIO Server](https://docs.min.io/docs/s3cmd-with-minio)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/docs/golang-client-quickstart-guide)
|
||||
- [The MinIO documentation website](https://docs.min.io)
|
||||
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
|
||||
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk2/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk2/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk3/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk3/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk4/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk4/bucket/testobj/xl.meta
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
buildscripts/cicd-corpus/disk5/bucket/testobj/xl.meta
Normal file
BIN
buildscripts/cicd-corpus/disk5/bucket/testobj/xl.meta
Normal file
Binary file not shown.
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -24,14 +24,18 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func genLDFlags(version string) string {
|
||||
releaseTag, date := releaseTag(version)
|
||||
copyrightYear := strconv.Itoa(date.Year())
|
||||
ldflagsStr := "-s -w"
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.Version=" + version
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.ReleaseTag=" + releaseTag(version)
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.CopyrightYear=" + copyrightYear
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.ReleaseTag=" + releaseTag
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.CommitID=" + commitID()
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.ShortCommitID=" + commitID()[:12]
|
||||
ldflagsStr += " -X github.com/minio/minio/cmd.GOPATH=" + os.Getenv("GOPATH")
|
||||
@@ -40,7 +44,7 @@ func genLDFlags(version string) string {
|
||||
}
|
||||
|
||||
// genReleaseTag prints release tag to the console for easy git tagging.
|
||||
func releaseTag(version string) string {
|
||||
func releaseTag(version string) (string, time.Time) {
|
||||
relPrefix := "DEVELOPMENT"
|
||||
if prefix := os.Getenv("MINIO_RELEASE"); prefix != "" {
|
||||
relPrefix = prefix
|
||||
@@ -53,14 +57,17 @@ func releaseTag(version string) string {
|
||||
|
||||
relTag := strings.Replace(version, " ", "-", -1)
|
||||
relTag = strings.Replace(relTag, ":", "-", -1)
|
||||
t, err := time.Parse("2006-01-02T15-04-05Z", relTag)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
relTag = strings.Replace(relTag, ",", "", -1)
|
||||
relTag = relPrefix + "." + relTag
|
||||
|
||||
if relSuffix != "" {
|
||||
relTag += "." + relSuffix
|
||||
}
|
||||
|
||||
return relTag
|
||||
return relTag, t
|
||||
}
|
||||
|
||||
// commitID returns the abbreviated commit-id hash of the last commit.
|
||||
|
||||
93
buildscripts/heal-inconsistent-versions.sh
Executable file
93
buildscripts/heal-inconsistent-versions.sh
Executable file
@@ -0,0 +1,93 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function start_minio_4drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...4}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 5
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${PWD}/mc" mb --with-versioning minio/bucket
|
||||
|
||||
for i in $(seq 1 4); do
|
||||
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
|
||||
|
||||
sudo chown -R root. "${WORK_DIR}/disk${i}"
|
||||
|
||||
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
|
||||
|
||||
sudo chown -R ${USER}. "${WORK_DIR}/disk${i}"
|
||||
done
|
||||
|
||||
for vid in $("${PWD}/mc" ls --json --versions minio/bucket/testobj | jq -r .versionId); do
|
||||
"${PWD}/mc" cat --vid "${vid}" minio/bucket/testobj | md5sum
|
||||
done
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_4drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
87
buildscripts/heal-manual.go
Normal file
87
buildscripts/heal-manual.go
Normal file
@@ -0,0 +1,87 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
//
|
||||
// MinIO Object Storage (c) 2022 MinIO, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
|
||||
// dummy values, please replace them with original values.
|
||||
|
||||
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
|
||||
// New returns an MinIO Admin client object.
|
||||
madmClnt, err := madmin.New(os.Args[1], os.Args[2], os.Args[3], false)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
opts := madmin.HealOpts{
|
||||
Recursive: true, // recursively heal all objects at 'prefix'
|
||||
Remove: true, // remove content that has lost quorum and not recoverable
|
||||
Recreate: true, // rewrite all old non-inlined xl.meta to new xl.meta
|
||||
ScanMode: madmin.HealNormalScan, // by default do not do 'deep' scanning
|
||||
}
|
||||
|
||||
start, _, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, "", false, false)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println("Healstart sequence ===")
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
if err = enc.Encode(&start); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
for {
|
||||
_, status, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, start.ClientToken, false, false)
|
||||
if status.Summary == "finished" {
|
||||
fmt.Println("Healstatus on items ===")
|
||||
for _, item := range status.Items {
|
||||
if err = enc.Encode(&item); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if status.Summary == "stopped" {
|
||||
fmt.Println("Healstatus on items ===")
|
||||
fmt.Println("Heal failed with", status.FailureDetail)
|
||||
break
|
||||
}
|
||||
|
||||
for _, item := range status.Items {
|
||||
if err = enc.Encode(&item); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
@@ -66,7 +66,7 @@ __init__() {
|
||||
mc mb minio/minio-test/
|
||||
mc cp ./minio minio/minio-test/to-read/
|
||||
mc cp /etc/hosts minio/minio-test/to-read/hosts
|
||||
mc policy set download minio/minio-test
|
||||
mc anonymous set download minio/minio-test
|
||||
|
||||
verify_checksum_mc ./minio minio/minio-test/to-read/minio
|
||||
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
set -e
|
||||
|
||||
export GORACE="history_size=7"
|
||||
export MINIO_API_REQUESTS_MAX=10000
|
||||
|
||||
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
|
||||
for d in $(go list ./... | grep -v dsync); do
|
||||
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
|
||||
|
||||
72
buildscripts/resolve-right-versions.sh
Executable file
72
buildscripts/resolve-right-versions.sh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function start_minio_5drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 5
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" stat minio/bucket/testobj
|
||||
|
||||
pkill minio
|
||||
sleep 3
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
start_minio_5drive ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
151
buildscripts/rewrite-old-new.sh
Executable file
151
buildscripts/rewrite-old-new.sh
Executable file
@@ -0,0 +1,151 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO_OLD=( "$PWD/minio.RELEASE.2020-10-28T08-16-50Z" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function download_old_release() {
|
||||
if [ ! -f minio.RELEASE.2020-10-28T08-16-50Z ]; then
|
||||
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2020-10-28T08-16-50Z
|
||||
chmod a+x minio.RELEASE.2020-10-28T08-16-50Z
|
||||
fi
|
||||
}
|
||||
|
||||
function verify_rewrite() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" mb minio/healing-rewrite-bucket --quiet --with-lock
|
||||
"${WORK_DIR}/mc" cp \
|
||||
buildscripts/verify-build.sh \
|
||||
minio/healing-rewrite-bucket/ \
|
||||
--disable-multipart --quiet
|
||||
|
||||
"${WORK_DIR}/mc" cp \
|
||||
buildscripts/verify-build.sh \
|
||||
minio/healing-rewrite-bucket/ \
|
||||
--disable-multipart --quiet
|
||||
|
||||
"${WORK_DIR}/mc" cp \
|
||||
buildscripts/verify-build.sh \
|
||||
minio/healing-rewrite-bucket/ \
|
||||
--disable-multipart --quiet
|
||||
|
||||
kill ${pid}
|
||||
sleep 3
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" > "${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kill ${pid}
|
||||
}
|
||||
|
||||
function main() {
|
||||
download_old_release
|
||||
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
verify_rewrite ${start_port}
|
||||
}
|
||||
|
||||
function purge()
|
||||
{
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -120,7 +120,7 @@ function start_minio_16drive() {
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
(cd inspects; "${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
@@ -140,7 +140,7 @@ function start_minio_16drive() {
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
(cd inspects; "${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
97
buildscripts/verify-healing-with-root-disks.sh
Executable file
97
buildscripts/verify-healing-with-root-disks.sh
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$(mktemp -d)"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
|
||||
function start_minio() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
unset MINIO_CI_CD
|
||||
unset CI
|
||||
|
||||
args=()
|
||||
for i in $(seq 1 4); do
|
||||
args+=("http://localhost:$[${start_port}+$i]${WORK_DIR}/mnt/disk$i/ ")
|
||||
done
|
||||
|
||||
for i in $(seq 1 4); do
|
||||
"${MINIO[@]}" --address ":$[$start_port+$i]" ${args[@]} 2>&1 >"${WORK_DIR}/server$i.log" &
|
||||
done
|
||||
|
||||
# Wait until all nodes return 403
|
||||
for i in $(seq 1 4); do
|
||||
while [ "$(curl -m 1 -s -o /dev/null -w "%{http_code}" http://localhost:$[$start_port+$i])" -ne "403" ]; do
|
||||
echo -n ".";
|
||||
sleep 1;
|
||||
done
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
# Prepare fake disks with losetup
|
||||
function prepare_block_devices() {
|
||||
mkdir -p ${WORK_DIR}/disks/ ${WORK_DIR}/mnt/
|
||||
for i in 1 2 3 4; do
|
||||
dd if=/dev/zero of=${WORK_DIR}/disks/img.$i bs=1M count=2048
|
||||
mkfs.ext4 -F ${WORK_DIR}/disks/img.$i
|
||||
sudo mknod /dev/minio-loopdisk$i b 7 $[256-$i]
|
||||
sudo losetup /dev/minio-loopdisk$i ${WORK_DIR}/disks/img.$i
|
||||
mkdir -p ${WORK_DIR}/mnt/disk$i/
|
||||
sudo mount /dev/minio-loopdisk$i ${WORK_DIR}/mnt/disk$i/
|
||||
sudo chown "$(id -u):$(id -g)" /dev/minio-loopdisk$i ${WORK_DIR}/mnt/disk$i/
|
||||
done
|
||||
}
|
||||
|
||||
# Start a distributed MinIO setup, unmount one disk and check if it is formatted
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_minio ${start_port}
|
||||
|
||||
# Unmount the disk, after the unmount the device id
|
||||
# /tmp/xxx/mnt/disk4 will be the same as '/' and it
|
||||
# will be detected as root disk
|
||||
while [ "$u" != "0" ]; do
|
||||
sudo umount ${WORK_DIR}/mnt/disk4/
|
||||
u=$?
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Wait until MinIO self heal kicks in
|
||||
sleep 60
|
||||
|
||||
if [ -f ${WORK_DIR}/mnt/disk4/.minio.sys/format.json ]; then
|
||||
echo "A root disk is formatted unexpectedely"
|
||||
cat "${WORK_DIR}/server4.log"
|
||||
exit -1
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
pkill minio
|
||||
sudo umount ${WORK_DIR}/mnt/disk{1..3}/
|
||||
sudo rm /dev/minio-loopdisk*
|
||||
rm -rf "$WORK_DIR"
|
||||
}
|
||||
|
||||
( prepare_block_devices )
|
||||
( main "$@" )
|
||||
rv=$?
|
||||
|
||||
cleanup
|
||||
exit "$rv"
|
||||
|
||||
@@ -80,7 +80,7 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -142,7 +142,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -18,15 +18,30 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
@@ -47,19 +62,18 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := pathClean(vars["bucket"])
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
@@ -76,15 +90,17 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
@@ -108,19 +124,18 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := pathClean(vars["bucket"])
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -145,20 +160,14 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
bucket := pathClean(vars["bucket"])
|
||||
update := r.Form.Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -192,7 +201,18 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
if update {
|
||||
ops = madmin.GetTargetUpdateOps(r.Form)
|
||||
} else {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
var exists bool // true if arn exists
|
||||
target.Arn, exists = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
if exists && target.Arn != "" { // return pre-existing ARN
|
||||
data, err := json.Marshal(target.Arn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
return
|
||||
}
|
||||
}
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
@@ -208,8 +228,10 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
for _, op := range ops {
|
||||
switch op {
|
||||
case madmin.CredentialsUpdateType:
|
||||
tgt.Credentials = target.Credentials
|
||||
tgt.TargetBucket = target.TargetBucket
|
||||
if !globalSiteReplicationSys.isEnabled() {
|
||||
tgt.Credentials = target.Credentials
|
||||
tgt.TargetBucket = target.TargetBucket
|
||||
}
|
||||
tgt.Secure = target.Secure
|
||||
tgt.Endpoint = target.Endpoint
|
||||
case madmin.SyncUpdateType:
|
||||
@@ -234,7 +256,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
case RemoteTargetConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -251,7 +273,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -274,19 +296,15 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
||||
vars := mux.Vars(r)
|
||||
bucket := pathClean(vars["bucket"])
|
||||
arnType := vars["type"]
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -314,19 +332,14 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
bucket := pathClean(vars["bucket"])
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -345,7 +358,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -353,3 +366,774 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ExportBucketMetadataHandler - exports all bucket metadata as a zipped file
|
||||
func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ExportBucketMetadata")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
bucket := pathClean(r.Form.Get("bucket"))
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportBucketMetadataAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
buckets []BucketInfo
|
||||
err error
|
||||
)
|
||||
if bucket != "" {
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
buckets = append(buckets, BucketInfo{Name: bucket})
|
||||
} else {
|
||||
buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize a zip writer which will provide a zipped content
|
||||
// of bucket metadata
|
||||
zipWriter := zip.NewWriter(w)
|
||||
defer zipWriter.Close()
|
||||
rawDataFn := func(r io.Reader, filename string, sz int) error {
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
size: int64(sz),
|
||||
mode: 0o600,
|
||||
modTime: time.Now(),
|
||||
isDir: false,
|
||||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := zipWriter.CreateHeader(header)
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
if _, err := io.Copy(zwriter, r); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cfgFiles := []string{
|
||||
bucketPolicyConfig,
|
||||
bucketNotificationConfig,
|
||||
bucketLifecycleConfig,
|
||||
bucketSSEConfig,
|
||||
bucketTaggingConfig,
|
||||
bucketQuotaConfigFile,
|
||||
objectLockConfig,
|
||||
bucketVersioningConfig,
|
||||
bucketReplicationConfig,
|
||||
bucketTargetsFile,
|
||||
}
|
||||
for _, bi := range buckets {
|
||||
for _, cfgFile := range cfgFiles {
|
||||
cfgPath := pathJoin(bi.Name, cfgFile)
|
||||
bucket := bi.Name
|
||||
switch cfgFile {
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketQuotaConfigFile:
|
||||
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketQuotaConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketSSEConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketSSEConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketTaggingConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketTaggingNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case objectLockConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketVersioningConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
// ignore empty versioning configs
|
||||
if config.Status != versioning.Enabled && config.Status != versioning.Suspended {
|
||||
continue
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketReplicationConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
case bucketTargetsFile:
|
||||
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketRemoteTargetNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type importMetaReport struct {
|
||||
madmin.BucketMetaImportErrs
|
||||
}
|
||||
|
||||
func (i *importMetaReport) SetStatus(bucket, fname string, err error) {
|
||||
st := i.Buckets[bucket]
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
switch fname {
|
||||
case bucketPolicyConfig:
|
||||
st.Policy = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case bucketNotificationConfig:
|
||||
st.Notification = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case bucketLifecycleConfig:
|
||||
st.Lifecycle = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case bucketSSEConfig:
|
||||
st.SSEConfig = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case bucketTaggingConfig:
|
||||
st.Tagging = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case bucketQuotaConfigFile:
|
||||
st.Quota = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case objectLockConfig:
|
||||
st.ObjectLock = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
case bucketVersioningConfig:
|
||||
st.Versioning = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
||||
default:
|
||||
st.Err = errMsg
|
||||
}
|
||||
i.Buckets[bucket] = st
|
||||
}
|
||||
|
||||
// ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config
|
||||
// There are some caveats regarding the following:
|
||||
// 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here.
|
||||
// 2. Replication config - is omitted from import as remote target credentials are not available from exported data for security reasons.
|
||||
// 3. lifecycle config - if transition rules are present, tier name needs to have been defined.
|
||||
func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ImportBucketMetadata")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ImportBucketMetadataAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
reader := bytes.NewReader(data)
|
||||
zr, err := zip.NewReader(reader, int64(len(data)))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
bucketMap := make(map[string]struct{}, 1)
|
||||
rpt := importMetaReport{
|
||||
madmin.BucketMetaImportErrs{
|
||||
Buckets: make(map[string]madmin.BucketStatus, len(zr.File)),
|
||||
},
|
||||
}
|
||||
// import object lock config if any - order of import matters here.
|
||||
for _, file := range zr.File {
|
||||
slc := strings.Split(file.Name, slashSeparator)
|
||||
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
||||
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
||||
continue
|
||||
}
|
||||
bucket, fileName := slc[0], slc[1]
|
||||
switch fileName {
|
||||
case objectLockConfig:
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
config, err := objectlock.ParseObjectLockConfig(reader)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
opts := MakeBucketOptions{
|
||||
LockEnabled: config.ObjectLockEnabled == "Enabled",
|
||||
}
|
||||
err = objectAPI.MakeBucket(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
bucketMap[bucket] = struct{}{}
|
||||
}
|
||||
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// import versioning metadata
|
||||
for _, file := range zr.File {
|
||||
slc := strings.Split(file.Name, slashSeparator)
|
||||
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
||||
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
||||
continue
|
||||
}
|
||||
bucket, fileName := slc[0], slc[1]
|
||||
switch fileName {
|
||||
case bucketVersioningConfig:
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize))
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
if err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{}); err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
bucketMap[bucket] = struct{}{}
|
||||
}
|
||||
|
||||
if globalSiteReplicationSys.isEnabled() && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("Cluster replication is enabled for this site, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range zr.File {
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
rpt.SetStatus(file.Name, "", err)
|
||||
continue
|
||||
}
|
||||
sz := file.FileInfo().Size()
|
||||
slc := strings.Split(file.Name, slashSeparator)
|
||||
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
||||
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
||||
continue
|
||||
}
|
||||
bucket, fileName := slc[0], slc[1]
|
||||
// create bucket if it does not exist yet.
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
rpt.SetStatus(bucket, "", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
bucketMap[bucket] = struct{}{}
|
||||
}
|
||||
if _, ok := bucketMap[bucket]; !ok {
|
||||
continue
|
||||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rulesMap := config.ToRulesMap()
|
||||
globalEventNotifier.AddRulesMap(bucket, rulesMap)
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
case bucketPolicyConfig:
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if sz > maxBucketPolicySize {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(reader, sz))
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Policy: bucketPolicyBytes,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
case bucketLifecycleConfig:
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz))
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate the transition storage ARNs
|
||||
if err = validateTransitionTier(bucketLifecycle); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
case bucketSSEConfig:
|
||||
// Parse bucket encryption xml
|
||||
encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize))
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Return error if KMS is not initialized
|
||||
if GlobalKMS == nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s", errorCodes[ErrKMSNotConfigured].Description))
|
||||
continue
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
case bucketTaggingConfig:
|
||||
tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz))
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(tags)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
|
||||
// Call site replication hook.
|
||||
//
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
case bucketQuotaConfigFile:
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
quotaConfig, err := parseBucketQuota(bucket, data)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if quotaConfig.Type == "fifo" {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore"))
|
||||
continue
|
||||
}
|
||||
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, rptData)
|
||||
}
|
||||
|
||||
// ReplicationDiffHandler - POST returns info on unreplicated versions for a remote target ARN
|
||||
// to the connected HTTP client.
|
||||
func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ReplicationDiff")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ReplicationDiff)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
opts := extractReplicateDiffOpts(r.Form)
|
||||
if opts.ARN != "" {
|
||||
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, opts.ARN)
|
||||
if tgt.Empty() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, fmt.Errorf("invalid arn : '%s'", opts.ARN)), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
diffCh, err := getReplicationDiff(ctx, objectAPI, bucket, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case entry, ok := <-diffCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(entry); err != nil {
|
||||
return
|
||||
}
|
||||
if len(diffCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(diffCh) > 0 {
|
||||
continue
|
||||
}
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,15 +20,20 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
// If any of the supplied actions are allowed it will be successful.
|
||||
// If nil ObjectLayer is returned, the operation is not permitted.
|
||||
// When nil ObjectLayer has been returned an error has always been sent to w.
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -40,11 +45,16 @@ func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Reques
|
||||
for _, action := range actions {
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
switch adminAPIErr {
|
||||
case ErrNone:
|
||||
return objectAPI, cred
|
||||
case ErrAccessDenied:
|
||||
// Try another
|
||||
continue
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
return objectAPI, cred
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return nil, auth.Credentials{}
|
||||
@@ -74,7 +84,13 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case config.Error:
|
||||
case config.ErrConfigNotFound:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigNotFoundError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
case config.ErrConfigGeneric:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: e.Error(),
|
||||
@@ -96,6 +112,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errTooManyPolicies):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminInvalidRequest",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errDecommissionAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
@@ -108,6 +130,18 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errDecommissionRebalanceAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioDecommissionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errRebalanceDecommissionAlreadyRunning):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioRebalanceNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
@@ -176,24 +210,6 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierBackendNotEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendNotEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errIsTierPermError(err):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientPermissions",
|
||||
@@ -217,3 +233,27 @@ func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// wraps export error for more context
|
||||
func exportError(ctx context.Context, err error, fname, entity string) APIError {
|
||||
if entity == "" {
|
||||
return toAPIError(ctx, fmt.Errorf("error exporting %s with: %w", fname, err))
|
||||
}
|
||||
return toAPIError(ctx, fmt.Errorf("error exporting %s from %s with: %w", entity, fname, err))
|
||||
}
|
||||
|
||||
// wraps import error for more context
|
||||
func importError(ctx context.Context, err error, fname, entity string) APIError {
|
||||
if entity == "" {
|
||||
return toAPIError(ctx, fmt.Errorf("error importing %s with: %w", fname, err))
|
||||
}
|
||||
return toAPIError(ctx, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
|
||||
}
|
||||
|
||||
// wraps import error for more context
|
||||
func importErrorWithAPIErr(ctx context.Context, apiErr APIErrorCode, err error, fname, entity string) APIError {
|
||||
if entity == "" {
|
||||
return errorCodes.ToAPIErrWithErr(apiErr, fmt.Errorf("error importing %s with: %w", fname, err))
|
||||
}
|
||||
return errorCodes.ToAPIErrWithErr(apiErr, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
|
||||
}
|
||||
|
||||
@@ -27,13 +27,14 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
xldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
idplugin "github.com/minio/minio/internal/config/identity/plugin"
|
||||
polplugin "github.com/minio/minio/internal/config/policy/plugin"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
@@ -70,7 +71,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
cfg, err := readServerConfig(ctx, objectAPI, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -80,6 +81,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
@@ -90,6 +92,12 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
// freshly retrieve the config so that default values are loaded for reset config
|
||||
if cfg, err = getValidConfig(objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic := config.SubSystemsDynamic.Contains(subSys)
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
@@ -97,13 +105,14 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
|
||||
r *http.Request, w http.ResponseWriter) {
|
||||
r *http.Request, w http.ResponseWriter,
|
||||
) {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
globalNotificationSys.SignalConfigReload(subSys)
|
||||
// Tell the client that dynamic config was applied.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
@@ -133,7 +142,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
cfg, err := readServerConfig(ctx, objectAPI, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -171,10 +180,16 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
//
|
||||
// `key` can be one of three forms:
|
||||
// 1. `subsys:target` -> request for config of a single subsystem and target pair.
|
||||
// 2. `subsys:` -> request for config of a single subsystem and the default target.
|
||||
// 3. `subsys` -> request for config of all targets for the given subsystem.
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKV")
|
||||
|
||||
@@ -187,15 +202,34 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
cfg := globalServerConfig.Clone()
|
||||
vars := mux.Vars(r)
|
||||
buf := &bytes.Buffer{}
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
key := vars["key"]
|
||||
|
||||
var subSys, target string
|
||||
{
|
||||
ws := strings.SplitN(key, madmin.SubSystemSeparator, 2)
|
||||
subSys = ws[0]
|
||||
if len(ws) == 2 {
|
||||
if ws[1] == "" {
|
||||
target = madmin.Default
|
||||
} else {
|
||||
target = ws[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
subSysConfigs, err := cfg.GetSubsysInfo(subSys, target)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var s strings.Builder
|
||||
for _, subSysConfig := range subSysConfigs {
|
||||
subSysConfig.WriteTo(&s, false)
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, buf.Bytes())
|
||||
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -262,7 +296,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
cfg, err := readServerConfig(ctx, objectAPI, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -420,45 +454,31 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
|
||||
var s strings.Builder
|
||||
hkvs := config.HelpSubSysMap[""]
|
||||
var count int
|
||||
for _, hkv := range hkvs {
|
||||
count += len(cfg[hkv.Key])
|
||||
}
|
||||
for _, hkv := range hkvs {
|
||||
v := cfg[hkv.Key]
|
||||
for target, kv := range v {
|
||||
off := kv.Get(config.Enable) == config.EnableOff
|
||||
// We ignore the error below, as we cannot get one.
|
||||
cfgSubsysItems, _ := cfg.GetSubsysInfo(hkv.Key, "")
|
||||
|
||||
for _, item := range cfgSubsysItems {
|
||||
off := item.Config.Get(config.Enable) == config.EnableOff
|
||||
switch hkv.Key {
|
||||
case config.EtcdSubSys:
|
||||
off = !etcd.Enabled(kv)
|
||||
off = !etcd.Enabled(item.Config)
|
||||
case config.CacheSubSys:
|
||||
off = !cache.Enabled(kv)
|
||||
off = !cache.Enabled(item.Config)
|
||||
case config.StorageClassSubSys:
|
||||
off = !storageclass.Enabled(kv)
|
||||
case config.PolicyOPASubSys:
|
||||
off = !opa.Enabled(kv)
|
||||
off = !storageclass.Enabled(item.Config)
|
||||
case config.PolicyPluginSubSys:
|
||||
off = !polplugin.Enabled(item.Config)
|
||||
case config.IdentityOpenIDSubSys:
|
||||
off = !openid.Enabled(kv)
|
||||
off = !openid.Enabled(item.Config)
|
||||
case config.IdentityLDAPSubSys:
|
||||
off = !xldap.Enabled(kv)
|
||||
off = !xldap.Enabled(item.Config)
|
||||
case config.IdentityTLSSubSys:
|
||||
off = !globalSTSTLSConfig.Enabled
|
||||
case config.IdentityPluginSubSys:
|
||||
off = !idplugin.Enabled(item.Config)
|
||||
}
|
||||
if off {
|
||||
s.WriteString(config.KvComment)
|
||||
s.WriteString(config.KvSpaceSeparator)
|
||||
}
|
||||
s.WriteString(hkv.Key)
|
||||
if target != config.Default {
|
||||
s.WriteString(config.SubSystemSeparator)
|
||||
s.WriteString(target)
|
||||
}
|
||||
s.WriteString(config.KvSpaceSeparator)
|
||||
s.WriteString(kv.String())
|
||||
count--
|
||||
if count > 0 {
|
||||
s.WriteString(config.KvNewline)
|
||||
}
|
||||
item.WriteTo(&s, off)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
438
cmd/admin-handlers-idp-config.go
Normal file
438
cmd/admin-handlers-idp-config.go
Normal file
@@ -0,0 +1,438 @@
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
"github.com/minio/pkg/ldap"
|
||||
)
|
||||
|
||||
func (a adminAPIHandlers) addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure body content type is opaque to ensure that request body has not
|
||||
// been interpreted as form data.
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "application/octet-stream" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
idpCfgType := mux.Vars(r)["type"]
|
||||
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var subSys string
|
||||
switch idpCfgType {
|
||||
case madmin.OpenidIDPCfg:
|
||||
subSys = madmin.IdentityOpenIDSubSys
|
||||
case madmin.LDAPIDPCfg:
|
||||
subSys = madmin.IdentityLDAPSubSys
|
||||
}
|
||||
|
||||
cfgName := mux.Vars(r)["name"]
|
||||
cfgTarget := madmin.Default
|
||||
if cfgName != "" {
|
||||
cfgTarget = cfgName
|
||||
if idpCfgType == madmin.LDAPIDPCfg && cfgName != madmin.Default {
|
||||
// LDAP does not support multiple configurations. So cfgName must be
|
||||
// empty or `madmin.Default`.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check that this is a valid Create vs Update API call.
|
||||
s := globalServerConfig.Clone()
|
||||
if apiErrCode := handleCreateUpdateValidation(s, subSys, cfgTarget, isUpdate); apiErrCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfgData := ""
|
||||
{
|
||||
tgtSuffix := ""
|
||||
if cfgTarget != madmin.Default {
|
||||
tgtSuffix = config.SubSystemSeparator + cfgTarget
|
||||
}
|
||||
cfgData = subSys + tgtSuffix + config.KvSpaceSeparator + string(reqBytes)
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic, err := cfg.ReadConfig(strings.NewReader(cfgData))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// IDP config is not dynamic. Sanity check.
|
||||
if dynamic {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
// error message back to client (likely mc).
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
|
||||
validationErr.FormatError(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write to the config input KV to history.
|
||||
if err = saveServerConfigHistory(ctx, objectAPI, []byte(cfgData)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func handleCreateUpdateValidation(s config.Config, subSys, cfgTarget string, isUpdate bool) APIErrorCode {
|
||||
if cfgTarget != madmin.Default {
|
||||
// This cannot give an error at this point.
|
||||
subSysTargets, _ := s.GetAvailableTargets(subSys)
|
||||
subSysTargetsSet := set.CreateStringSet(subSysTargets...)
|
||||
if isUpdate && !subSysTargetsSet.Contains(cfgTarget) {
|
||||
return ErrAdminConfigIDPCfgNameDoesNotExist
|
||||
}
|
||||
if !isUpdate && subSysTargetsSet.Contains(cfgTarget) {
|
||||
return ErrAdminConfigIDPCfgNameAlreadyExists
|
||||
}
|
||||
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// For the default configuration name, since it will always be an available
|
||||
// target, we need to check if a configuration value has been set previously
|
||||
// to figure out if this is a valid create or update API call.
|
||||
|
||||
// This cannot really error (FIXME: improve the type for GetConfigInfo)
|
||||
var cfgInfos []madmin.IDPCfgInfo
|
||||
switch subSys {
|
||||
case madmin.IdentityOpenIDSubSys:
|
||||
cfgInfos, _ = globalOpenIDConfig.GetConfigInfo(s, cfgTarget)
|
||||
case madmin.IdentityLDAPSubSys:
|
||||
cfgInfos, _ = globalLDAPConfig.GetConfigInfo(s, cfgTarget)
|
||||
}
|
||||
|
||||
if len(cfgInfos) > 0 && !isUpdate {
|
||||
return ErrAdminConfigIDPCfgNameAlreadyExists
|
||||
}
|
||||
if len(cfgInfos) == 0 && isUpdate {
|
||||
return ErrAdminConfigIDPCfgNameDoesNotExist
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// AddIdentityProviderCfg: adds a new IDP config for openid/ldap.
|
||||
//
|
||||
// PUT <admin-prefix>/idp-cfg/openid/dex1 -> create named config `dex1`
|
||||
//
|
||||
// PUT <admin-prefix>/idp-cfg/openid/_ -> create (default) named config `_`
|
||||
func (a adminAPIHandlers) AddIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddIdentityProviderCfg")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
a.addOrUpdateIDPHandler(ctx, w, r, false)
|
||||
}
|
||||
|
||||
// UpdateIdentityProviderCfg: updates an existing IDP config for openid/ldap.
|
||||
//
|
||||
// PATCH <admin-prefix>/idp-cfg/openid/dex1 -> update named config `dex1`
|
||||
//
|
||||
// PATCH <admin-prefix>/idp-cfg/openid/_ -> update (default) named config `_`
|
||||
func (a adminAPIHandlers) UpdateIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateIdentityProviderCfg")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
a.addOrUpdateIDPHandler(ctx, w, r, true)
|
||||
}
|
||||
|
||||
// ListIdentityProviderCfg:
|
||||
//
|
||||
// GET <admin-prefix>/idp-cfg/openid -> lists openid provider configs.
|
||||
func (a adminAPIHandlers) ListIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListIdentityProviderCfg")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
password := cred.SecretKey
|
||||
|
||||
idpCfgType := mux.Vars(r)["type"]
|
||||
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var cfgList []madmin.IDPListItem
|
||||
var err error
|
||||
switch idpCfgType {
|
||||
case madmin.OpenidIDPCfg:
|
||||
cfg := globalServerConfig.Clone()
|
||||
cfgList, err = globalOpenIDConfig.GetConfigList(cfg)
|
||||
case madmin.LDAPIDPCfg:
|
||||
cfg := globalServerConfig.Clone()
|
||||
cfgList, err = globalLDAPConfig.GetConfigList(cfg)
|
||||
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(cfgList)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// GetIdentityProviderCfg:
|
||||
//
|
||||
// GET <admin-prefix>/idp-cfg/openid/dex_test
|
||||
func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetIdentityProviderCfg")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
idpCfgType := mux.Vars(r)["type"]
|
||||
cfgName := mux.Vars(r)["name"]
|
||||
password := cred.SecretKey
|
||||
|
||||
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg := globalServerConfig.Clone()
|
||||
var cfgInfos []madmin.IDPCfgInfo
|
||||
var err error
|
||||
switch idpCfgType {
|
||||
case madmin.OpenidIDPCfg:
|
||||
cfgInfos, err = globalOpenIDConfig.GetConfigInfo(cfg, cfgName)
|
||||
case madmin.LDAPIDPCfg:
|
||||
cfgInfos, err = globalLDAPConfig.GetConfigInfo(cfg, cfgName)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, openid.ErrProviderConfigNotFound) || errors.Is(err, cfgldap.ErrProviderConfigNotFound) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
res := madmin.IDPConfig{
|
||||
Type: idpCfgType,
|
||||
Name: cfgName,
|
||||
Info: cfgInfos,
|
||||
}
|
||||
data, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// DeleteIdentityProviderCfg:
|
||||
//
|
||||
// DELETE <admin-prefix>/idp-cfg/openid/dex_test
|
||||
func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteIdentityProviderCfg")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
idpCfgType := mux.Vars(r)["type"]
|
||||
cfgName := mux.Vars(r)["name"]
|
||||
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfgCopy := globalServerConfig.Clone()
|
||||
var subSys string
|
||||
switch idpCfgType {
|
||||
case madmin.OpenidIDPCfg:
|
||||
subSys = config.IdentityOpenIDSubSys
|
||||
cfgInfos, err := globalOpenIDConfig.GetConfigInfo(cfgCopy, cfgName)
|
||||
if err != nil {
|
||||
if errors.Is(err, openid.ErrProviderConfigNotFound) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
hasEnv := false
|
||||
for _, ci := range cfgInfos {
|
||||
if ci.IsCfg && ci.IsEnv {
|
||||
hasEnv = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasEnv {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
|
||||
return
|
||||
}
|
||||
case madmin.LDAPIDPCfg:
|
||||
subSys = config.IdentityLDAPSubSys
|
||||
cfgInfos, err := globalLDAPConfig.GetConfigInfo(cfgCopy, cfgName)
|
||||
if err != nil {
|
||||
if errors.Is(err, openid.ErrProviderConfigNotFound) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
hasEnv := false
|
||||
for _, ci := range cfgInfos {
|
||||
if ci.IsCfg && ci.IsEnv {
|
||||
hasEnv = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasEnv {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
|
||||
return
|
||||
}
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI, nil)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfgKey := fmt.Sprintf("%s:%s", subSys, cfgName)
|
||||
if cfgName == madmin.Default {
|
||||
cfgKey = subSys
|
||||
}
|
||||
if err = cfg.DelKVS(cfgKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = validateConfig(cfg, subSys); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic := config.SubSystemsDynamic.Contains(subSys)
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
|
||||
}
|
||||
}
|
||||
181
cmd/admin-handlers-idp-ldap.go
Normal file
181
cmd/admin-handlers-idp-ldap.go
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
//
|
||||
// GET <admin-prefix>/idp/ldap/policy-entities?[query-params]
|
||||
//
|
||||
// Query params:
|
||||
//
|
||||
// user=... -> repeatable query parameter, specifying users to query for
|
||||
// policy mapping
|
||||
//
|
||||
// group=... -> repeatable query parameter, specifying groups to query for
|
||||
// policy mapping
|
||||
//
|
||||
// policy=... -> repeatable query parameter, specifying policy to query for
|
||||
// user/group mapping
|
||||
//
|
||||
// When all query parameters are omitted, returns mappings for all policies.
|
||||
func (a adminAPIHandlers) ListLDAPPolicyMappingEntities(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListLDAPPolicyMappingEntities")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Check authorization.
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r,
|
||||
iampolicy.ListGroupsAdminAction, iampolicy.ListUsersAdminAction, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate API arguments.
|
||||
|
||||
q := madmin.PolicyEntitiesQuery{
|
||||
Users: r.Form["user"],
|
||||
Groups: r.Form["group"],
|
||||
Policy: r.Form["policy"],
|
||||
}
|
||||
|
||||
// Query IAM
|
||||
|
||||
res, err := globalIAMSys.QueryLDAPPolicyEntities(r.Context(), q)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Encode result and send response.
|
||||
|
||||
data, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
password := cred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// AttachDetachPolicyLDAP attaches or detaches policies from an LDAP entity
|
||||
// (user or group).
|
||||
//
|
||||
// POST <admin-prefix>/idp/ldap/policy/{operation}
|
||||
func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AttachDetachPolicyLDAP")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Check authorization.
|
||||
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.UpdatePolicyAssociationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure body content type is opaque to ensure that request body has not
|
||||
// been interpreted as form data.
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "application/octet-stream" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate operation
|
||||
operation := mux.Vars(r)["operation"]
|
||||
if operation != "attach" && operation != "detach" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
isAttach := operation == "attach"
|
||||
|
||||
// Validate API arguments in body.
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var par madmin.PolicyAssociationReq
|
||||
err = json.Unmarshal(reqBytes, &par)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := par.IsValid(); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call IAM subsystem
|
||||
updatedAt, addedOrRemoved, err := globalIAMSys.PolicyDBUpdateLDAP(ctx, isAttach, par)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
respBody := madmin.PolicyAssociationResp{
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
if isAttach {
|
||||
respBody.PoliciesAttached = addedOrRemoved
|
||||
} else {
|
||||
respBody.PoliciesDetached = addedOrRemoved
|
||||
}
|
||||
|
||||
data, err := json.Marshal(respBody)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
@@ -19,6 +19,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -26,6 +28,11 @@ import (
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
errRebalanceDecommissionAlreadyRunning = errors.New("Rebalance cannot be started, decommission is aleady in progress")
|
||||
errDecommissionRebalanceAlreadyRunning = errors.New("Decommission cannot be started, rebalance is already in progress")
|
||||
)
|
||||
|
||||
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StartDecommission")
|
||||
|
||||
@@ -48,6 +55,11 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
if pools.IsRebalanceStarted() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errDecommissionRebalanceAlreadyRunning), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
|
||||
@@ -58,6 +70,16 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.Decommission(r.Context(), idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -96,6 +118,16 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
@@ -129,8 +161,10 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
apiErr := toAdminAPIErr(ctx, errInvalidArgument)
|
||||
apiErr.Description = fmt.Sprintf("specified pool '%s' not found, please specify a valid pool", v)
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -177,3 +211,137 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RebalanceStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// NB rebalance-start admin API is always coordinated from first pool's
|
||||
// first node. The following is required to serialize (the effects of)
|
||||
// concurrent rebalance-start commands.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok || len(pools.serverPools) == 1 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if pools.IsDecommissionRunning() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errRebalanceDecommissionAlreadyRunning), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if pools.IsRebalanceStarted() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketInfos, err := objectAPI.ListBuckets(ctx, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
buckets := make([]string, 0, len(bucketInfos))
|
||||
for _, bInfo := range bucketInfos {
|
||||
buckets = append(buckets, bInfo.Name)
|
||||
}
|
||||
|
||||
var id string
|
||||
if id, err = pools.initRebalanceMeta(ctx, buckets); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Rebalance routine is run on the first node of any pool participating in rebalance.
|
||||
pools.StartRebalance()
|
||||
|
||||
b, err := json.Marshal(struct {
|
||||
ID string `json:"id"`
|
||||
}{ID: id})
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, b)
|
||||
// Notify peers to load rebalance.bin and start rebalance routine if they happen to be
|
||||
// participating pool's leader node
|
||||
globalNotificationSys.LoadRebalanceMeta(ctx, true)
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RebalanceStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Proxy rebalance-status to first pool first node, so that users see a
|
||||
// consistent view of rebalance progress even though different rebalancing
|
||||
// pools may temporarily have out of date info on the others.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
rs, err := rebalanceStatus(ctx, pools)
|
||||
if err != nil {
|
||||
if errors.Is(err, errRebalanceNotStarted) || errors.Is(err, errConfigNotFound) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RebalanceStop")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
pools, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Cancel any ongoing rebalance operation
|
||||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
}
|
||||
|
||||
@@ -22,11 +22,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
@@ -113,20 +114,27 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.MakeWithVersioningBktOp:
|
||||
_, isLockEnabled := r.Form["lockEnabled"]
|
||||
_, isVersioningEnabled := r.Form["versioningEnabled"]
|
||||
opts := BucketOptions{
|
||||
Location: r.Form.Get("location"),
|
||||
LockEnabled: isLockEnabled,
|
||||
VersioningEnabled: isVersioningEnabled,
|
||||
createdAt, cerr := time.Parse(time.RFC3339Nano, strings.TrimSpace(r.Form.Get("createdAt")))
|
||||
if cerr != nil {
|
||||
createdAt = timeSentinel
|
||||
}
|
||||
|
||||
opts := MakeBucketOptions{
|
||||
LockEnabled: r.Form.Get("lockEnabled") == "true",
|
||||
VersioningEnabled: r.Form.Get("versioningEnabled") == "true",
|
||||
ForceCreate: r.Form.Get("forceCreate") == "true",
|
||||
CreatedAt: createdAt,
|
||||
}
|
||||
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
|
||||
case madmin.ConfigureReplBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket)
|
||||
case madmin.DeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, false)
|
||||
case madmin.ForceDeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, true)
|
||||
case madmin.DeleteBucketBktOp, madmin.ForceDeleteBucketBktOp:
|
||||
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
|
||||
Force: operation == madmin.ForceDeleteBucketBktOp,
|
||||
SRDeleteOp: getSRBucketDeleteOp(true),
|
||||
})
|
||||
case madmin.PurgeDeletedBucketOp:
|
||||
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -158,7 +166,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRIAMItemPolicy:
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
|
||||
} else {
|
||||
policy, perr := iampolicy.ParseConfig(bytes.NewReader(item.Policy))
|
||||
if perr != nil {
|
||||
@@ -166,21 +174,21 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
if policy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil)
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy)
|
||||
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy, item.UpdatedAt)
|
||||
}
|
||||
}
|
||||
case madmin.SRIAMItemSvcAcc:
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange)
|
||||
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange, item.UpdatedAt)
|
||||
case madmin.SRIAMItemPolicyMapping:
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping)
|
||||
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping, item.UpdatedAt)
|
||||
case madmin.SRIAMItemSTSAcc:
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential)
|
||||
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential, item.UpdatedAt)
|
||||
case madmin.SRIAMItemIAMUser:
|
||||
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser)
|
||||
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser, item.UpdatedAt)
|
||||
case madmin.SRIAMItemGroupInfo:
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo)
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -212,7 +220,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
case madmin.SRBucketMetaTypePolicy:
|
||||
if item.Policy == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
|
||||
} else {
|
||||
bktPolicy, berr := policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
|
||||
if berr != nil {
|
||||
@@ -220,31 +228,33 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
if bktPolicy.IsEmpty() {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil)
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
|
||||
} else {
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy, item.UpdatedAt)
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeQuotaConfig:
|
||||
if item.Quota == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil)
|
||||
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil, item.UpdatedAt)
|
||||
} else {
|
||||
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig); err != nil {
|
||||
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig, item.UpdatedAt); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeVersionConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketVersioningHandler(ctx, item.Bucket, item.Versioning, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaTypeTags:
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags)
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaTypeObjectLockConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig)
|
||||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig)
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -294,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return SRError{
|
||||
Cause: err,
|
||||
@@ -432,6 +442,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
opts.ShowDeleted = q.Get("showDeleted") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
@@ -490,3 +501,45 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SiteReplicationResyncOp - PUT /minio/admin/v3/site-replication/resync/op
|
||||
func (a adminAPIHandlers) SiteReplicationResyncOp(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SiteReplicationResyncOp")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationResyncAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var peerSite madmin.PeerInfo
|
||||
if err := parseJSONBody(ctx, r.Body, &peerSite, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
vars := mux.Vars(r)
|
||||
op := madmin.SiteResyncOp(vars["operation"])
|
||||
var (
|
||||
status madmin.SRResyncOpStatus
|
||||
err error
|
||||
)
|
||||
switch op {
|
||||
case madmin.SiteResyncStart:
|
||||
status, err = globalSiteReplicationSys.startResync(ctx, objectAPI, peerSite)
|
||||
case madmin.SiteResyncCancel:
|
||||
status, err = globalSiteReplicationSys.cancelResync(ctx, objectAPI, peerSite)
|
||||
default:
|
||||
err = errSRInvalidRequest(errInvalidArgument)
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
body, err := json.Marshal(status)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
@@ -26,11 +26,12 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
@@ -41,11 +42,15 @@ func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
t.Skip("windows is clunky")
|
||||
}
|
||||
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on ErasureSD backend with signature v4.
|
||||
{serverType: "ErasureSD", signer: signerV4},
|
||||
// Init and run test on ErasureSD backend, with tls enabled.
|
||||
{serverType: "ErasureSD", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
@@ -73,7 +78,7 @@ func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,16 +24,17 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
cr "github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
@@ -50,6 +51,8 @@ const (
|
||||
type TestSuiteIAM struct {
|
||||
TestSuiteCommon
|
||||
|
||||
ServerTypeDescription string
|
||||
|
||||
// Flag to turn on tests for etcd backend IAM
|
||||
withEtcdBackend bool
|
||||
|
||||
@@ -59,7 +62,15 @@ type TestSuiteIAM struct {
|
||||
}
|
||||
|
||||
func newTestSuiteIAM(c TestSuiteCommon, withEtcdBackend bool) *TestSuiteIAM {
|
||||
return &TestSuiteIAM{TestSuiteCommon: c, withEtcdBackend: withEtcdBackend}
|
||||
etcdStr := ""
|
||||
if withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
return &TestSuiteIAM{
|
||||
TestSuiteCommon: c,
|
||||
ServerTypeDescription: fmt.Sprintf("%s%s", c.serverType, etcdStr),
|
||||
withEtcdBackend: withEtcdBackend,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) iamSetup(c *check) {
|
||||
@@ -87,6 +98,29 @@ func (s *TestSuiteIAM) iamSetup(c *check) {
|
||||
}
|
||||
}
|
||||
|
||||
// List of all IAM test suites (i.e. test server configuration combinations)
|
||||
// common to tests.
|
||||
var iamTestSuites = func() []*TestSuiteIAM {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on ErasureSD backend with signature v4.
|
||||
{serverType: "ErasureSD", signer: signerV4},
|
||||
// Init and run test on ErasureSD backend, with tls enabled.
|
||||
{serverType: "ErasureSD", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
return testCases
|
||||
}()
|
||||
|
||||
const (
|
||||
EnvTestEtcdBackend = "ETCD_SERVER"
|
||||
)
|
||||
@@ -156,30 +190,12 @@ func (s *TestSuiteIAM) getUserClient(c *check, accessKey, secretKey, sessionToke
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
t.Skip("windows is clunky disable these tests")
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
for i, testCase := range iamTestSuites {
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
|
||||
func(t *testing.T) {
|
||||
suite := testCase
|
||||
c := &check{t, testCase.serverType}
|
||||
@@ -226,6 +242,7 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
@@ -371,7 +388,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
req.ContentLength = int64(len(buf))
|
||||
sum := sha256.Sum256(buf)
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(buf))
|
||||
req.Body = io.NopCloser(bytes.NewReader(buf))
|
||||
req = signer.SignV4(*req, accessKey, secretKey, "", "")
|
||||
|
||||
// 3.1 Execute the request.
|
||||
@@ -890,6 +907,9 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 6. Check that service account cannot be created for some other user.
|
||||
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
@@ -960,7 +980,168 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
c.assertSvcAccDeletion(ctx, s, s.adm, accessKey, bucket)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
pluginEndpoint := os.Getenv("POLICY_PLUGIN_ENDPOINT")
|
||||
if pluginEndpoint == "" {
|
||||
c.Skip("POLICY_PLUGIN_ENDPOINT not given - skipping.")
|
||||
}
|
||||
|
||||
configCmds := []string{
|
||||
"policy_plugin",
|
||||
"url=" + pluginEndpoint,
|
||||
}
|
||||
|
||||
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
|
||||
if err != nil {
|
||||
c.Fatalf("unable to setup access management plugin for tests: %v", err)
|
||||
}
|
||||
|
||||
s.RestartIAMSuite(c)
|
||||
}
|
||||
|
||||
// TestIAM_AMPInternalIDPServerSuite - tests for access management plugin
|
||||
func TestIAM_AMPInternalIDPServerSuite(t *testing.T) {
|
||||
for i, testCase := range iamTestSuites {
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
|
||||
func(t *testing.T) {
|
||||
suite := testCase
|
||||
c := &check{t, testCase.serverType}
|
||||
|
||||
suite.SetUpSuite(c)
|
||||
defer suite.TearDownSuite(c)
|
||||
|
||||
suite.SetUpAccMgmtPlugin(c)
|
||||
|
||||
suite.TestAccMgmtPlugin(c)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccMgmtPlugin - this test assumes that the access-management-plugin is
|
||||
// the same as the example in `docs/iam/access-manager-plugin.go` -
|
||||
// specifically, it denies only `s3:Put*` operations on non-root accounts.
|
||||
func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
// 0. Check that owner is able to make-bucket.
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a user.
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
// 2. Check new user appears in listing
|
||||
usersMap, err := s.adm.ListUsers(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("error listing: %v", err)
|
||||
}
|
||||
v, ok := usersMap[accessKey]
|
||||
if !ok {
|
||||
c.Fatalf("user not listed: %s", accessKey)
|
||||
}
|
||||
c.Assert(v.Status, madmin.AccountEnabled)
|
||||
|
||||
// 3. Check that user is able to make a bucket.
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("user not create bucket: %v", err)
|
||||
}
|
||||
|
||||
// 3.1 check user has access to bucket
|
||||
c.mustListObjects(ctx, client, bucket)
|
||||
|
||||
// 3.2 check that user cannot upload an object.
|
||||
_, err = client.PutObject(ctx, bucket, "objectName", bytes.NewBuffer([]byte("some content")), 12, minio.PutObjectOptions{})
|
||||
if err == nil {
|
||||
c.Fatalf("user was able to upload unexpectedly")
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
|
||||
Creds: cr.NewStaticV4(accessKey, secretKey, ""),
|
||||
Secure: s.secure,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Err creating user admin client: %v", err)
|
||||
}
|
||||
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
|
||||
|
||||
// Create svc acc
|
||||
cr := c.mustCreateSvcAccount(ctx, accessKey, userAdmClient)
|
||||
|
||||
// 1. Check that svc account appears in listing
|
||||
c.assertSvcAccAppearsInListing(ctx, userAdmClient, accessKey, cr.AccessKey)
|
||||
|
||||
// 2. Check that svc account info can be queried
|
||||
c.assertSvcAccInfoQueryable(ctx, userAdmClient, accessKey, cr.AccessKey, false)
|
||||
|
||||
// 3. Check S3 access
|
||||
c.assertSvcAccS3Access(ctx, s, cr, bucket)
|
||||
|
||||
// Check that session policies do not apply - as policy enforcement is
|
||||
// delegated to plugin.
|
||||
{
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
// Though the attached policy does not allow listing, it will be
|
||||
// ignored because the plugin allows it.
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
// 4. Check that service account's secret key and account status can be
|
||||
// updated.
|
||||
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
|
||||
|
||||
// 6. Check that service account **can** be created for some other user.
|
||||
// This is possible because of the policy enforced in the plugin.
|
||||
c.mustCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
}
|
||||
|
||||
func (c *check) mustCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) madmin.Credentials {
|
||||
c.Helper()
|
||||
randUser := mustGetUUID()
|
||||
randPass := mustGetUUID()
|
||||
err := admClnt.AddUser(ctx, randUser, randPass)
|
||||
@@ -974,6 +1155,7 @@ func (c *check) mustCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClie
|
||||
}
|
||||
|
||||
func (c *check) mustGetIAMUserInfo(ctx context.Context, admClnt *madmin.AdminClient, accessKey string) madmin.UserInfo {
|
||||
c.Helper()
|
||||
ui, err := admClnt.GetUserInfo(ctx, accessKey)
|
||||
if err != nil {
|
||||
c.Fatalf("should be able to get user info: %v", err)
|
||||
@@ -982,6 +1164,7 @@ func (c *check) mustGetIAMUserInfo(ctx context.Context, admClnt *madmin.AdminCli
|
||||
}
|
||||
|
||||
func (c *check) mustNotCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) {
|
||||
c.Helper()
|
||||
randUser := mustGetUUID()
|
||||
randPass := mustGetUUID()
|
||||
err := admClnt.AddUser(ctx, randUser, randPass)
|
||||
@@ -991,6 +1174,7 @@ func (c *check) mustNotCreateIAMUser(ctx context.Context, admClnt *madmin.AdminC
|
||||
}
|
||||
|
||||
func (c *check) mustCreateSvcAccount(ctx context.Context, tgtUser string, admClnt *madmin.AdminClient) madmin.Credentials {
|
||||
c.Helper()
|
||||
cr, err := admClnt.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: tgtUser,
|
||||
})
|
||||
@@ -1001,6 +1185,7 @@ func (c *check) mustCreateSvcAccount(ctx context.Context, tgtUser string, admCln
|
||||
}
|
||||
|
||||
func (c *check) mustNotCreateSvcAccount(ctx context.Context, tgtUser string, admClnt *madmin.AdminClient) {
|
||||
c.Helper()
|
||||
_, err := admClnt.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: tgtUser,
|
||||
})
|
||||
@@ -1010,28 +1195,90 @@ func (c *check) mustNotCreateSvcAccount(ctx context.Context, tgtUser string, adm
|
||||
}
|
||||
|
||||
func (c *check) mustNotListObjects(ctx context.Context, client *minio.Client, bucket string) {
|
||||
c.Helper()
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok || v.Err == nil {
|
||||
c.Fatalf("user was able to list unexpectedly!")
|
||||
c.Fatalf("user was able to list unexpectedly! on %s", bucket)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustPutObjectWithTags(ctx context.Context, client *minio.Client, bucket, object string) {
|
||||
c.Helper()
|
||||
_, err := client.PutObject(ctx, bucket, object, bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{
|
||||
UserTags: map[string]string{
|
||||
"security": "public",
|
||||
"virus": "true",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("user was unable to upload the object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustGetObject(ctx context.Context, client *minio.Client, bucket, object string) {
|
||||
c.Helper()
|
||||
|
||||
r, err := client.GetObject(ctx, bucket, object, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("user was unable to download the object: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
_, err = io.Copy(io.Discard, r)
|
||||
if err != nil {
|
||||
c.Fatalf("user was unable to download the object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustHeadObject(ctx context.Context, client *minio.Client, bucket, object string, tagCount int) {
|
||||
c.Helper()
|
||||
|
||||
oinfo, err := client.StatObject(ctx, bucket, object, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("user was unable to download the object: %v", err)
|
||||
}
|
||||
|
||||
if oinfo.UserTagCount != tagCount {
|
||||
c.Fatalf("expected tagCount: %d, got %d", tagCount, oinfo.UserTagCount)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustListObjects(ctx context.Context, client *minio.Client, bucket string) {
|
||||
c.Helper()
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if ok && v.Err != nil {
|
||||
msg := fmt.Sprintf("user was unable to list: %v", v.Err)
|
||||
c.Fatalf(msg)
|
||||
c.Fatalf("user was unable to list: %v", v.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustListBuckets(ctx context.Context, client *minio.Client) {
|
||||
c.Helper()
|
||||
_, err := client.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("user was unable to list buckets: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustNotUpload(ctx context.Context, client *minio.Client, bucket string) {
|
||||
c.Helper()
|
||||
_, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
|
||||
if e, ok := err.(minio.ErrorResponse); ok {
|
||||
if e.Code == "AccessDenied" {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Fatalf("upload did not get an AccessDenied error - got %#v instead", err)
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccS3Access(ctx context.Context, s *TestSuiteIAM, cr madmin.Credentials, bucket string) {
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccAppearsInListing(ctx context.Context, madmClient *madmin.AdminClient, parentAK, svcAK string) {
|
||||
c.Helper()
|
||||
listResp, err := madmClient.ListServiceAccounts(ctx, parentAK)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to list svc accounts: %v", err)
|
||||
@@ -1057,6 +1304,7 @@ func (c *check) assertSvcAccInfoQueryable(ctx context.Context, madmClient *madmi
|
||||
// bucket. It creates a session policy that restricts listing on the bucket and
|
||||
// then enables it again in a session policy update call.
|
||||
func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
|
||||
c.Helper()
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
@@ -1112,6 +1360,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccSecretKeyAndStatusUpdate(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
|
||||
c.Helper()
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
@@ -1148,6 +1397,7 @@ func (c *check) assertSvcAccSecretKeyAndStatusUpdate(ctx context.Context, s *Tes
|
||||
}
|
||||
|
||||
func (c *check) assertSvcAccDeletion(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
|
||||
c.Helper()
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
@@ -1168,6 +1418,7 @@ func (c *check) assertSvcAccDeletion(ctx context.Context, s *TestSuiteIAM, madmC
|
||||
}
|
||||
|
||||
func mustGenerateCredentials(c *check) (string, string) {
|
||||
c.Helper()
|
||||
ak, sk, err := auth.GenerateCredentials()
|
||||
if err != nil {
|
||||
c.Fatalf("unable to generate credentials: %v", err)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@@ -31,7 +30,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
)
|
||||
|
||||
@@ -74,7 +73,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
initAllSubsystems()
|
||||
initAllSubsystems(ctx)
|
||||
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
@@ -220,7 +219,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
resp, _ := ioutil.ReadAll(rec.Body)
|
||||
resp, _ := io.ReadAll(rec.Body)
|
||||
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
|
||||
http.StatusOK, rec.Code, string(resp))
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
@@ -91,31 +91,27 @@ type allHealState struct {
|
||||
sync.RWMutex
|
||||
|
||||
// map of heal path to heal sequence
|
||||
healSeqMap map[string]*healSequence // Indexed by endpoint
|
||||
healLocalDisks map[Endpoint]struct{}
|
||||
healSeqMap map[string]*healSequence // Indexed by endpoint
|
||||
// keep track of the healing status of disks in the memory
|
||||
// false: the disk needs to be healed but no healing routine is started
|
||||
// true: the disk is currently healing
|
||||
healLocalDisks map[Endpoint]bool
|
||||
healStatus map[string]healingTracker // Indexed by disk ID
|
||||
}
|
||||
|
||||
// newHealState - initialize global heal state management
|
||||
func newHealState(cleanup bool) *allHealState {
|
||||
func newHealState(ctx context.Context, cleanup bool) *allHealState {
|
||||
hstate := &allHealState{
|
||||
healSeqMap: make(map[string]*healSequence),
|
||||
healLocalDisks: map[Endpoint]struct{}{},
|
||||
healLocalDisks: make(map[Endpoint]bool),
|
||||
healStatus: make(map[string]healingTracker),
|
||||
}
|
||||
if cleanup {
|
||||
go hstate.periodicHealSeqsClean(GlobalContext)
|
||||
go hstate.periodicHealSeqsClean(ctx)
|
||||
}
|
||||
return hstate
|
||||
}
|
||||
|
||||
func (ahs *allHealState) healDriveCount() int {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
return len(ahs.healLocalDisks)
|
||||
}
|
||||
|
||||
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
@@ -165,23 +161,34 @@ func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
|
||||
return dst
|
||||
}
|
||||
|
||||
// getHealLocalDiskEndpoints() returns the list of disks that need
|
||||
// to be healed but there is no healing routine in progress on them.
|
||||
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
|
||||
var endpoints Endpoints
|
||||
for ep := range ahs.healLocalDisks {
|
||||
endpoints = append(endpoints, ep)
|
||||
for ep, healing := range ahs.healLocalDisks {
|
||||
if !healing {
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (ahs *allHealState) markDiskForHealing(ep Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
ahs.healLocalDisks[ep] = true
|
||||
}
|
||||
|
||||
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
|
||||
for _, ep := range healLocalDisks {
|
||||
ahs.healLocalDisks[ep] = struct{}{}
|
||||
ahs.healLocalDisks[ep] = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,7 +201,6 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-periodicTimer.C:
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@@ -203,6 +209,8 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
@@ -214,8 +222,8 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
// getHealSequenceByToken - Retrieve a heal sequence by token. The second
|
||||
// argument returns if a heal sequence actually exists.
|
||||
func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence, exists bool) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
for _, healSeq := range ahs.healSeqMap {
|
||||
if healSeq.clientToken == token {
|
||||
return healSeq, true
|
||||
@@ -227,8 +235,8 @@ func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence,
|
||||
// getHealSequence - Retrieve a heal sequence by path. The second
|
||||
// argument returns if a heal sequence actually exists.
|
||||
func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists bool) {
|
||||
ahs.Lock()
|
||||
defer ahs.Unlock()
|
||||
ahs.RLock()
|
||||
defer ahs.RUnlock()
|
||||
h, exists = ahs.healSeqMap[path]
|
||||
return h, exists
|
||||
}
|
||||
@@ -581,12 +589,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// heal-results in memory and the client has not consumed it
|
||||
// for too long.
|
||||
unconsumedTimer := time.NewTimer(healUnconsumedTimeout)
|
||||
defer func() {
|
||||
// stop the timeout timer so it is garbage collected.
|
||||
if !unconsumedTimer.Stop() {
|
||||
<-unconsumedTimer.C
|
||||
}
|
||||
}()
|
||||
defer unconsumedTimer.Stop()
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
@@ -701,7 +704,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
} else {
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
@@ -808,16 +811,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
}
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
// failure error occurred.
|
||||
func (h *healSequence) healDiskFormat() error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
if h.isQuitting() {
|
||||
@@ -829,7 +822,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
}
|
||||
|
||||
buckets, err := objAPI.ListBuckets(h.ctx)
|
||||
buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
@@ -60,12 +60,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(gz(httpTraceAll(adminAPI.ServerInfoHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/inspect-data").HandlerFunc(httpTraceHdrs(adminAPI.InspectDataHandler)).Queries("volume", "{volume:.*}", "file", "{file:.*}")
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(httpTraceAll(adminAPI.InspectDataHandler))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.StorageInfoHandler)))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
|
||||
// Metrics operation
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(gz(httpTraceAll(adminAPI.MetricsHandler)))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Heal operations
|
||||
@@ -82,12 +84,19 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/decommission").HandlerFunc(gz(httpTraceAll(adminAPI.StartDecommission))).Queries("pool", "{pool:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/pools/cancel").HandlerFunc(gz(httpTraceAll(adminAPI.CancelDecommission))).Queries("pool", "{pool:.*}")
|
||||
|
||||
// Rebalance operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/rebalance/start").HandlerFunc(gz(httpTraceAll(adminAPI.RebalanceStart)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/rebalance/status").HandlerFunc(gz(httpTraceAll(adminAPI.RebalanceStatus)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/rebalance/stop").HandlerFunc(gz(httpTraceAll(adminAPI.RebalanceStop)))
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(gz(httpTraceAll(adminAPI.StartProfilingHandler))).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(gz(httpTraceAll(adminAPI.DownloadProfilingHandler)))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(gz(httpTraceAll(adminAPI.ProfileHandler)))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@@ -133,12 +142,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListServiceAccounts)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(gz(httpTraceHdrs(adminAPI.DeleteServiceAccount))).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// STS accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(gz(httpTraceHdrs(adminAPI.TemporaryAccountInfo))).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(gz(httpTraceHdrs(adminAPI.InfoCannedPolicy))).Queries("name", "{name:.*}")
|
||||
// List policies latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-canned-policies").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListBucketPolicies))).Queries("bucket", "{bucket:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListCannedPolicies)))
|
||||
|
||||
// Builtin IAM policy associations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/builtin/policy-entities").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListPolicyMappingEntities)))
|
||||
|
||||
// Remove policy IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveCannedPolicy))).Queries("name", "{name:.*}")
|
||||
|
||||
@@ -147,6 +162,12 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup))).
|
||||
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
|
||||
|
||||
// Attach policies to user or group
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/builtin/policy/attach").HandlerFunc(gz(httpTraceHdrs(adminAPI.AttachPolicyBuiltin)))
|
||||
|
||||
// Detach policies from user or group
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/builtin/policy/detach").HandlerFunc(gz(httpTraceHdrs(adminAPI.DetachPolicyBuiltin)))
|
||||
|
||||
// Remove user IAM
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveUser))).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
@@ -168,50 +189,88 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// Set Group Status
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SetGroupStatus))).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// Export IAM info to zipped file
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-iam").HandlerFunc(httpTraceHdrs(adminAPI.ExportIAM))
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ListRemoteTargetsHandler))).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.SetRemoteTargetHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler))).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
// Import IAM info
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(httpTraceHdrs(adminAPI.ImportIAM))
|
||||
|
||||
// Remote Tier management operations
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
// IDentity Provider configuration APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddIdentityProviderCfg)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(gz(httpTraceHdrs(adminAPI.UpdateIdentityProviderCfg)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListIdentityProviderCfg)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetIdentityProviderCfg)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(gz(httpTraceHdrs(adminAPI.DeleteIdentityProviderCfg)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListLDAPPolicyMappingEntities)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(gz(httpTraceHdrs(adminAPI.AttachDetachPolicyLDAP)))
|
||||
// -- END IAM APIs --
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
}
|
||||
// GetBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// PutBucketQuotaConfig
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler))).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Bucket replication operations
|
||||
// GetBucketTargetHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ListRemoteTargetsHandler))).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
|
||||
// SetRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.SetRemoteTargetHandler))).Queries("bucket", "{bucket:.*}")
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler))).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
// ReplicationDiff - MinIO extension API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/replication/diff").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ReplicationDiffHandler))).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Batch job operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/start-job").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.StartBatchJob)))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ListBatchJobs)))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.DescribeBatchJob)))
|
||||
|
||||
// Bucket migration operations
|
||||
// ExportBucketMetaHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ExportBucketMetadataHandler)))
|
||||
// ImportBucketMetaHandler
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-bucket-metadata").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ImportBucketMetadataHandler)))
|
||||
|
||||
// Remote Tier management operations
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler)))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.RemoveTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.VerifyTierHandler)))
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationRemove)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/info").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/metainfo").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationMetaInfo)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/status").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationStatus)))
|
||||
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/join").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerJoin)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/peer/bucket-ops").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerBucketOps))).Queries("bucket", "{bucket:.*}").Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/iam-item").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateIAMItem)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/bucket-meta").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerReplicateBucketItem)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/site-replication/peer/idp-settings").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerGetIDPSettings)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerEdit)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(gz(httpTraceHdrs(adminAPI.SRPeerRemove)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/resync/op").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationResyncOp))).Queries("operation", "{operation:.*}")
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
@@ -221,8 +280,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
Queries("paths", "{paths:.*}").HandlerFunc(gz(httpTraceHdrs(adminAPI.ForceUnlockHandler)))
|
||||
}
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(httpTraceHdrs(adminAPI.ObjectSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedTestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/object").HandlerFunc(httpTraceHdrs(adminAPI.ObjectSpeedTestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/drive").HandlerFunc(httpTraceHdrs(adminAPI.DriveSpeedtestHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest/net").HandlerFunc(httpTraceHdrs(adminAPI.NetperfHandler))
|
||||
|
||||
@@ -238,16 +297,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(gz(httpTraceAll(adminAPI.KMSCreateKeyHandler))).Queries("key-id", "{key-id:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(gz(httpTraceAll(adminAPI.KMSKeyStatusHandler)))
|
||||
|
||||
if !globalIsGateway {
|
||||
// Keep obdinfo for backward compatibility with mc
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.BandwidthMonitorHandler)))
|
||||
}
|
||||
// Keep obdinfo for backward compatibility with mc
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo").
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.BandwidthMonitorHandler)))
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
|
||||
@@ -20,18 +20,24 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
|
||||
var localEndpoints Endpoints
|
||||
addr := r.Host
|
||||
addr := globalLocalNodeName
|
||||
if r != nil {
|
||||
addr = r.Host
|
||||
}
|
||||
if globalIsDistErasure {
|
||||
addr = globalLocalNodeName
|
||||
}
|
||||
@@ -40,17 +46,16 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
nodeName := endpoint.Host
|
||||
if nodeName == "" {
|
||||
nodeName = r.Host
|
||||
nodeName = addr
|
||||
}
|
||||
if endpoint.IsLocal {
|
||||
// Only proceed for local endpoints
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
localEndpoints = append(localEndpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
_, present := network[nodeName]
|
||||
if !present {
|
||||
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
@@ -64,6 +69,22 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
var memstats runtime.MemStats
|
||||
runtime.ReadMemStats(&memstats)
|
||||
|
||||
gcStats := debug.GCStats{
|
||||
// If stats.PauseQuantiles is non-empty, ReadGCStats fills
|
||||
// it with quantiles summarizing the distribution of pause time.
|
||||
// For example, if len(stats.PauseQuantiles) is 5, it will be
|
||||
// filled with the minimum, 25%, 50%, 75%, and maximum pause times.
|
||||
PauseQuantiles: make([]time.Duration, 5),
|
||||
}
|
||||
debug.ReadGCStats(&gcStats)
|
||||
// Truncate GC stats to max 5 entries.
|
||||
if len(gcStats.PauseEnd) > 5 {
|
||||
gcStats.PauseEnd = gcStats.PauseEnd[len(gcStats.PauseEnd)-5:]
|
||||
}
|
||||
if len(gcStats.Pause) > 5 {
|
||||
gcStats.Pause = gcStats.Pause[len(gcStats.Pause)-5:]
|
||||
}
|
||||
|
||||
props := madmin.ServerProperties{
|
||||
State: string(madmin.ItemInitializing),
|
||||
Endpoint: addr,
|
||||
@@ -78,14 +99,53 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
Frees: memstats.Frees,
|
||||
HeapAlloc: memstats.HeapAlloc,
|
||||
},
|
||||
GoMaxProcs: runtime.GOMAXPROCS(0),
|
||||
NumCPU: runtime.NumCPU(),
|
||||
RuntimeVersion: runtime.Version(),
|
||||
GCStats: &madmin.GCStats{
|
||||
LastGC: gcStats.LastGC,
|
||||
NumGC: gcStats.NumGC,
|
||||
PauseTotal: gcStats.PauseTotal,
|
||||
Pause: gcStats.Pause,
|
||||
PauseEnd: gcStats.PauseEnd,
|
||||
},
|
||||
MinioEnvVars: make(map[string]string, 10),
|
||||
}
|
||||
|
||||
sensitive := map[string]struct{}{
|
||||
config.EnvAccessKey: {},
|
||||
config.EnvSecretKey: {},
|
||||
config.EnvRootUser: {},
|
||||
config.EnvRootPassword: {},
|
||||
config.EnvMinIOSubnetAPIKey: {},
|
||||
config.EnvKMSSecretKey: {},
|
||||
}
|
||||
for _, v := range os.Environ() {
|
||||
if !strings.HasPrefix(v, "MINIO") && !strings.HasPrefix(v, "_MINIO") {
|
||||
continue
|
||||
}
|
||||
split := strings.SplitN(v, "=", 2)
|
||||
key := split[0]
|
||||
value := ""
|
||||
if len(split) > 1 {
|
||||
value = split[1]
|
||||
}
|
||||
|
||||
// Do not send sensitive creds.
|
||||
if _, ok := sensitive[key]; ok || strings.Contains(strings.ToLower(key), "password") || strings.HasSuffix(strings.ToLower(key), "key") {
|
||||
props.MinioEnvVars[key] = "*** EXISTS, REDACTED ***"
|
||||
continue
|
||||
}
|
||||
props.MinioEnvVars[key] = value
|
||||
}
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil && !globalIsGateway {
|
||||
// only need Disks information in server mode.
|
||||
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
|
||||
if objLayer != nil {
|
||||
storageInfo := objLayer.LocalStorageInfo(GlobalContext)
|
||||
props.State = string(madmin.ItemOnline)
|
||||
props.Disks = storageInfo.Disks
|
||||
} else {
|
||||
props.State = string(madmin.ItemOffline)
|
||||
}
|
||||
|
||||
return props
|
||||
|
||||
@@ -30,7 +30,8 @@ import (
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
@@ -131,7 +132,7 @@ const (
|
||||
ErrReplicationNeedsVersioningError
|
||||
ErrReplicationBucketNeedsVersioningError
|
||||
ErrReplicationDenyEditError
|
||||
ErrReplicationNoMatchingRuleError
|
||||
ErrReplicationNoExistingObjects
|
||||
ErrObjectRestoreAlreadyInProgress
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
@@ -150,7 +151,7 @@ const (
|
||||
ErrSignatureVersionNotSupported
|
||||
ErrBucketNotEmpty
|
||||
ErrAllAccessDisabled
|
||||
ErrMalformedPolicy
|
||||
ErrPolicyInvalidVersion
|
||||
ErrMissingFields
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
@@ -194,10 +195,13 @@ const (
|
||||
ErrBucketTaggingNotFound
|
||||
ErrObjectLockInvalidHeaders
|
||||
ErrInvalidTagDirective
|
||||
ErrPolicyAlreadyAttached
|
||||
ErrPolicyNotAttached
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
// SSE-S3/SSE-KMS related API errors
|
||||
ErrInvalidEncryptionMethod
|
||||
ErrInvalidEncryptionKeyID
|
||||
|
||||
// Server-Side-Encryption (with Customer provided key) related API errors.
|
||||
ErrInsecureSSECustomerRequest
|
||||
@@ -231,6 +235,7 @@ const (
|
||||
|
||||
// S3 extended errors.
|
||||
ErrContentSHA256Mismatch
|
||||
ErrContentChecksumMismatch
|
||||
|
||||
// Add new extended error codes here.
|
||||
|
||||
@@ -260,14 +265,22 @@ const (
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminPolicyChangeAlreadyApplied
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
ErrAdminInvalidSecretKey
|
||||
ErrAdminConfigNoQuorum
|
||||
ErrAdminConfigTooLarge
|
||||
ErrAdminConfigBadJSON
|
||||
ErrAdminNoSuchConfigTarget
|
||||
ErrAdminConfigEnvOverridden
|
||||
ErrAdminConfigDuplicateKeys
|
||||
ErrAdminConfigInvalidIDPType
|
||||
ErrAdminConfigLDAPValidation
|
||||
ErrAdminConfigIDPCfgNameAlreadyExists
|
||||
ErrAdminConfigIDPCfgNameDoesNotExist
|
||||
ErrAdminCredentialsMismatch
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
@@ -280,6 +293,11 @@ const (
|
||||
ErrSiteReplicationBucketConfigError
|
||||
ErrSiteReplicationBucketMetaError
|
||||
ErrSiteReplicationIAMError
|
||||
ErrSiteReplicationConfigMissing
|
||||
|
||||
// Pool rebalance errors
|
||||
ErrAdminRebalanceAlreadyStarted
|
||||
ErrAdminRebalanceNotStarted
|
||||
|
||||
// Bucket Quota error codes
|
||||
ErrAdminBucketQuotaExceeded
|
||||
@@ -383,10 +401,13 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrAdminServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
|
||||
ErrInvalidChecksum
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
@@ -691,9 +712,9 @@ var errorCodes = errorCodeMap{
|
||||
Description: "All access to this resource has been disabled.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMalformedPolicy: {
|
||||
ErrPolicyInvalidVersion: {
|
||||
Code: "MalformedPolicy",
|
||||
Description: "Policy has invalid resource.",
|
||||
Description: "The policy must contain a valid version string",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingFields: {
|
||||
@@ -883,7 +904,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error - please check remote service credentials and target bucket",
|
||||
Description: "Remote service connection error",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationBandwidthLimitError: {
|
||||
@@ -891,15 +912,15 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Bandwidth limit for remote target must be atleast 100MBps",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationNoMatchingRuleError: {
|
||||
Code: "XMinioReplicationNoMatchingRule",
|
||||
Description: "No matching replication rule found for this object prefix",
|
||||
ErrReplicationNoExistingObjects: {
|
||||
Code: "XMinioReplicationNoExistingObjects",
|
||||
Description: "No matching ExistingsObjects rule enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrReplicationDenyEditError: {
|
||||
Code: "XMinioReplicationDenyEdit",
|
||||
Description: "Cannot alter local replication config since this server is in a cluster replication setup",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBucketRemoteIdenticalToSource: {
|
||||
Code: "XMinioAdminRemoteIdenticalToSource",
|
||||
@@ -1068,6 +1089,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The encryption method specified is not supported",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncryptionKeyID: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "The specified KMS KeyID contains unsupported characters",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInsecureSSECustomerRequest: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Requests specifying Server Side Encryption with Customer provided keys must be made over a secure connection.",
|
||||
@@ -1150,11 +1176,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrContentChecksumMismatch: {
|
||||
Code: "XAmzContentChecksumMismatch",
|
||||
Description: "The provided 'x-amz-checksum' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
Description: "Storage backend has reached its minimum free drive threshold. Please delete a few objects to proceed.",
|
||||
HTTPStatusCode: http.StatusInsufficientStorage,
|
||||
},
|
||||
ErrRequestBodyParse: {
|
||||
@@ -1165,7 +1196,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrObjectExistsAsDirectory: {
|
||||
Code: "XMinioObjectExistsAsDirectory",
|
||||
Description: "Object name already exists as a directory.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidObjectName: {
|
||||
Code: "XMinioInvalidObjectName",
|
||||
@@ -1202,6 +1233,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The specified group does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchJob: {
|
||||
Code: "XMinioAdminNoSuchJob",
|
||||
Description: "The specified job does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminGroupNotEmpty: {
|
||||
Code: "XMinioAdminGroupNotEmpty",
|
||||
Description: "The specified group is not empty - cannot remove it.",
|
||||
@@ -1212,6 +1248,12 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The canned policy does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminPolicyChangeAlreadyApplied: {
|
||||
Code: "XMinioAdminPolicyChangeAlreadyApplied",
|
||||
Description: "The specified policy change is already in effect.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
ErrAdminInvalidArgument: {
|
||||
Code: "XMinioAdminInvalidArgument",
|
||||
Description: "Invalid arguments specified.",
|
||||
@@ -1238,16 +1280,46 @@ var errorCodes = errorCodeMap{
|
||||
maxEConfigJSONSize),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchConfigTarget: {
|
||||
Code: "XMinioAdminNoSuchConfigTarget",
|
||||
Description: "No such named configuration target exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigBadJSON: {
|
||||
Code: "XMinioAdminConfigBadJSON",
|
||||
Description: "JSON configuration provided is of incorrect format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigEnvOverridden: {
|
||||
Code: "XMinioAdminConfigEnvOverridden",
|
||||
Description: "Unable to update config via Admin API due to environment variable override",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigDuplicateKeys: {
|
||||
Code: "XMinioAdminConfigDuplicateKeys",
|
||||
Description: "JSON configuration provided has objects with duplicate keys",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigInvalidIDPType: {
|
||||
Code: "XMinioAdminConfigInvalidIDPType",
|
||||
Description: fmt.Sprintf("Invalid IDP configuration type - must be one of %v", madmin.ValidIDPConfigTypes),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigLDAPValidation: {
|
||||
Code: "XMinioAdminConfigLDAPValidation",
|
||||
Description: "LDAP Configuration validation failed",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigIDPCfgNameAlreadyExists: {
|
||||
Code: "XMinioAdminConfigIDPCfgNameAlreadyExists",
|
||||
Description: "An IDP configuration with the given name aleady exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigIDPCfgNameDoesNotExist: {
|
||||
Code: "XMinioAdminConfigIDPCfgNameDoesNotExist",
|
||||
Description: "No such IDP configuration exists",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminConfigNotificationTargetsFailed: {
|
||||
Code: "XMinioAdminNotificationTargetsTestFailed",
|
||||
Description: "Configuration update failed due an unsuccessful attempt to connect to one or more notification servers",
|
||||
@@ -1312,7 +1384,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrSiteReplicationPeerResp: {
|
||||
Code: "XMinioSiteReplicationPeerResp",
|
||||
Description: "Error received when contacting a peer site",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrSiteReplicationBackendIssue: {
|
||||
Code: "XMinioSiteReplicationBackendIssue",
|
||||
@@ -1339,7 +1411,21 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Error while replicating an IAM item",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
|
||||
ErrSiteReplicationConfigMissing: {
|
||||
Code: "XMinioSiteReplicationConfigMissingError",
|
||||
Description: "Site not found in site replication configuration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminRebalanceAlreadyStarted: {
|
||||
Code: "XMinioAdminRebalanceAlreadyStarted",
|
||||
Description: "Pool rebalance is already started",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrAdminRebalanceNotStarted: {
|
||||
Code: "XMinioAdminRebalanceNotStarted",
|
||||
Description: "Pool rebalance is not started",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrMaximumExpires: {
|
||||
Code: "AuthorizationQueryParametersError",
|
||||
Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
|
||||
@@ -1825,6 +1911,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "User is not allowed to be same as admin access key",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAdminResourceInvalidArgument: {
|
||||
Code: "XMinioInvalidResource",
|
||||
Description: "Policy, user or group names are not allowed to begin or end with space characters",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminAccountNotEligible: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The administrator key is not eligible for this operation",
|
||||
@@ -1845,6 +1936,21 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Invalid according to Policy: Policy Condition failed",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidChecksum: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid checksum provided.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyAlreadyAttached: {
|
||||
Code: "XMinioPolicyAlreadyAttached",
|
||||
Description: "The specified policy is already attached.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrPolicyNotAttached: {
|
||||
Code: "XMinioPolicyNotAttached",
|
||||
Description: "The specified policy is not found.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
@@ -1875,8 +1981,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errNoPolicyToAttachOrDetach:
|
||||
apiErr = ErrAdminPolicyChangeAlreadyApplied
|
||||
case errSignatureMismatch:
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case errInvalidRange:
|
||||
@@ -1898,6 +2008,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrInvalidEncryptionParameters
|
||||
case crypto.ErrInvalidEncryptionMethod:
|
||||
apiErr = ErrInvalidEncryptionMethod
|
||||
case crypto.ErrInvalidEncryptionKeyID:
|
||||
apiErr = ErrInvalidEncryptionKeyID
|
||||
case crypto.ErrInvalidCustomerAlgorithm:
|
||||
apiErr = ErrInvalidSSECustomerAlgorithm
|
||||
case crypto.ErrMissingCustomerKey:
|
||||
@@ -2015,6 +2127,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case hash.SHA256Mismatch:
|
||||
apiErr = ErrContentSHA256Mismatch
|
||||
case hash.ChecksumMismatch:
|
||||
apiErr = ErrContentChecksumMismatch
|
||||
case ObjectTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case ObjectTooSmall:
|
||||
@@ -2043,7 +2157,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrRemoteDestinationNotFoundError
|
||||
case BucketRemoteTargetNotFound:
|
||||
apiErr = ErrRemoteTargetNotFoundError
|
||||
case BucketRemoteConnectionErr:
|
||||
case RemoteTargetConnectionErr:
|
||||
apiErr = ErrReplicationRemoteConnectionError
|
||||
case BucketRemoteAlreadyExists:
|
||||
apiErr = ErrBucketRemoteAlreadyExists
|
||||
@@ -2165,9 +2279,10 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
// their internal error types. This code is only
|
||||
// useful with gateway implementations.
|
||||
// their internal error types.
|
||||
switch e := err.(type) {
|
||||
case batchReplicationJobError:
|
||||
apiErr = APIError(e)
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -2220,7 +2335,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
}
|
||||
case crypto.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinIOEncryptionError",
|
||||
Code: "XMinioEncryptionError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
@@ -2230,7 +2345,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
if globalIsGateway && strings.Contains(e.Message, "KMS is not configured") {
|
||||
if strings.Contains(e.Message, "KMS is not configured") {
|
||||
apiErr = APIError{
|
||||
Code: "NotImplemented",
|
||||
Description: e.Message,
|
||||
@@ -2254,7 +2369,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
// Add more other SDK related errors here if any in future.
|
||||
default:
|
||||
//nolint:gocritic
|
||||
if errors.Is(err, errMalformedEncoding) {
|
||||
|
||||
@@ -20,7 +20,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -30,6 +29,8 @@ import (
|
||||
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
// Returns a hexadecimal representation of time at the
|
||||
@@ -64,9 +65,13 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
// Encodes the response headers into XML format.
|
||||
func encodeResponse(response interface{}) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
bytesBuffer.WriteString(xml.Header)
|
||||
e := xml.NewEncoder(&bytesBuffer)
|
||||
e.Encode(response)
|
||||
bytesBuffer.WriteString(xxml.Header)
|
||||
buf, err := xxml.Marshal(response)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
bytesBuffer.Write(buf)
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
@@ -199,5 +204,12 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts())
|
||||
}
|
||||
|
||||
if v, ok := objInfo.UserDefined[ReservedMetadataPrefix+"compression"]; ok {
|
||||
if i := strings.LastIndexByte(v, '/'); i >= 0 {
|
||||
v = v[i+1:]
|
||||
}
|
||||
w.Header()[xhttp.MinIOCompressed] = []string{v}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -29,19 +29,20 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/amztime"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 1000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 1000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
@@ -162,6 +163,12 @@ type Part struct {
|
||||
LastModified string
|
||||
ETag string
|
||||
Size int64
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
}
|
||||
|
||||
// ListPartsResponse - format for list parts response.
|
||||
@@ -183,6 +190,7 @@ type ListPartsResponse struct {
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
|
||||
ChecksumAlgorithm string
|
||||
// List of parts.
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
@@ -252,45 +260,88 @@ type ObjectVersion struct {
|
||||
}
|
||||
|
||||
// MarshalXML - marshal ObjectVersion
|
||||
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
func (o ObjectVersion) MarshalXML(e *xxml.Encoder, start xxml.StartElement) error {
|
||||
if o.isDeleteMarker {
|
||||
start.Name.Local = "DeleteMarker"
|
||||
} else {
|
||||
start.Name.Local = "Version"
|
||||
}
|
||||
|
||||
type objectVersionWrapper ObjectVersion
|
||||
return e.EncodeElement(objectVersionWrapper(o), start)
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string
|
||||
type StringMap map[string]string
|
||||
// DeleteMarkerVersion container for delete marker metadata
|
||||
type DeleteMarkerVersion struct {
|
||||
Key string
|
||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
|
||||
// Owner of the object.
|
||||
Owner Owner
|
||||
|
||||
IsLatest bool
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// Metadata metadata items implemented to ensure XML marshaling works.
|
||||
type Metadata struct {
|
||||
Items []struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
}
|
||||
|
||||
// Set add items, duplicate items get replaced.
|
||||
func (s *Metadata) Set(k, v string) {
|
||||
for i, item := range s.Items {
|
||||
if item.Key == k {
|
||||
s.Items[i] = struct {
|
||||
Key string
|
||||
Value string
|
||||
}{
|
||||
Key: k,
|
||||
Value: v,
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
s.Items = append(s.Items, struct {
|
||||
Key string
|
||||
Value string
|
||||
}{
|
||||
Key: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
type xmlKeyEntry struct {
|
||||
XMLName xml.Name
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
t := xml.StartElement{}
|
||||
t.Name = xml.Name{
|
||||
Space: "",
|
||||
Local: key,
|
||||
}
|
||||
tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name})
|
||||
func (s *Metadata) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokens = append(tokens, xml.EndElement{
|
||||
Name: start.Name,
|
||||
})
|
||||
if len(s.Items) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, t := range tokens {
|
||||
if err := e.EncodeToken(t); err != nil {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range s.Items {
|
||||
if err := e.Encode(xmlKeyEntry{
|
||||
XMLName: xml.Name{Local: item.Key},
|
||||
Value: item.Value,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// flush to ensure tokens are written
|
||||
return e.Flush()
|
||||
return e.EncodeToken(start.End())
|
||||
}
|
||||
|
||||
// Object container for object metadata
|
||||
@@ -307,7 +358,7 @@ type Object struct {
|
||||
StorageClass string
|
||||
|
||||
// UserMetadata user-defined metadata
|
||||
UserMetadata StringMap `xml:"UserMetadata,omitempty"`
|
||||
UserMetadata *Metadata `xml:"UserMetadata,omitempty"`
|
||||
}
|
||||
|
||||
// CopyObjectResponse container returns ETag and LastModified of the successfully copied object
|
||||
@@ -350,6 +401,11 @@ type CompleteMultipartUploadResponse struct {
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteError structure.
|
||||
@@ -425,7 +481,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
for _, bucket := range buckets {
|
||||
listbuckets = append(listbuckets, Bucket{
|
||||
Name: bucket.Name,
|
||||
CreationDate: bucket.Created.UTC().Format(iso8601TimeFormat),
|
||||
CreationDate: amztime.ISO8601Format(bucket.Created.UTC()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -438,6 +494,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
@@ -445,12 +502,12 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data := ListVersionsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
content := ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
content := ObjectVersion{}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
content.LastModified = amztime.ISO8601Format(object.ModTime.UTC())
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
@@ -508,7 +565,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
content.LastModified = amztime.ISO8601Format(object.ModTime.UTC())
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
@@ -557,7 +614,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
continue
|
||||
}
|
||||
content.Key = s3EncodeName(object.Name, encodingType)
|
||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
||||
content.LastModified = amztime.ISO8601Format(object.ModTime.UTC())
|
||||
if object.ETag != "" {
|
||||
content.ETag = "\"" + object.ETag + "\""
|
||||
}
|
||||
@@ -569,16 +626,16 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
}
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
content.UserMetadata = &Metadata{}
|
||||
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
@@ -588,7 +645,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
}
|
||||
contents = append(contents, content)
|
||||
@@ -620,7 +677,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
|
||||
return CopyObjectResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
LastModified: amztime.ISO8601Format(lastModified.UTC()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,7 +685,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR
|
||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
||||
return CopyObjectPartResponse{
|
||||
ETag: "\"" + etag + "\"",
|
||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
||||
LastModified: amztime.ISO8601Format(lastModified.UTC()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -642,14 +699,20 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
}
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse {
|
||||
return CompleteMultipartUploadResponse{
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums()
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// generates ListPartsResponse from ListPartsInfo.
|
||||
@@ -674,6 +737,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
||||
listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm
|
||||
|
||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
||||
for index, part := range partsInfo.Parts {
|
||||
@@ -681,7 +745,11 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.PartNumber = part.PartNumber
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
newPart.Size = part.Size
|
||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
newPart.LastModified = amztime.ISO8601Format(part.LastModified.UTC())
|
||||
newPart.ChecksumCRC32 = part.ChecksumCRC32
|
||||
newPart.ChecksumCRC32C = part.ChecksumCRC32C
|
||||
newPart.ChecksumSHA1 = part.ChecksumSHA1
|
||||
newPart.ChecksumSHA256 = part.ChecksumSHA256
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -711,7 +779,7 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
||||
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
|
||||
newUpload.Initiated = amztime.ISO8601Format(upload.Initiated.UTC())
|
||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
||||
}
|
||||
return listMultipartUploadsResponse
|
||||
@@ -728,6 +796,14 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
if statusCode == 0 {
|
||||
statusCode = 200
|
||||
}
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if statusCode < 100 || statusCode > 999 {
|
||||
logger.Error(fmt.Sprintf("invalid WriteHeader code %v", statusCode))
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
if mType != mimeNone {
|
||||
w.Header().Set(xhttp.ContentType, string(mType))
|
||||
@@ -791,6 +867,12 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
|
||||
logger.Error(fmt.Sprintf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
err.HTTPStatusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
||||
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
|
||||
@@ -342,7 +342,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
collectAPIStats("getbucketnotification", maxClients(gz(httpTraceAll(api.GetBucketNotificationHandler))))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
collectAPIStats("listennotification", gz(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("resetbucketreplicationstatus", maxClients(gz(httpTraceAll(api.ResetBucketReplicationStatusHandler))))).Queries("replication-reset-status", "")
|
||||
@@ -474,7 +474,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(gz(httpTraceAll(api.ListenNotificationHandler))))).Queries("events", "{events:.*}")
|
||||
collectAPIStats("listennotification", gz(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
|
||||
@@ -35,8 +35,8 @@ func shouldEscape(c byte) bool {
|
||||
|
||||
// s3URLEncode is based on Golang's url.QueryEscape() code,
|
||||
// while considering some S3 exceptions:
|
||||
// - Avoid encoding '/' and '*'
|
||||
// - Force encoding of '~'
|
||||
// - Avoid encoding '/' and '*'
|
||||
// - Force encoding of '~'
|
||||
func s3URLEncode(s string) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -25,7 +25,6 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -40,6 +39,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mcontext"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
@@ -85,6 +85,8 @@ func isRequestSignStreamingV4(r *http.Request) bool {
|
||||
}
|
||||
|
||||
// Authorization type.
|
||||
//
|
||||
//go:generate stringer -type=authType -trimprefix=authType $GOFILE
|
||||
type authType int
|
||||
|
||||
// List of all supported auth types.
|
||||
@@ -149,9 +151,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
s3Err = isReqAuthenticated(ctx, r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
|
||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
@@ -197,13 +196,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
if token == "" {
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -212,13 +205,19 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
claims, err := auth.ExtractClaims(token, secret)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
if subtle.ConstantTimeCompare([]byte(secret), []byte(globalActiveCred.SecretKey)) == 1 {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims, err = auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
}
|
||||
|
||||
// If OPA is set, return without any further checks.
|
||||
if globalPolicyOPA != nil {
|
||||
// If AuthZPlugin is set, return without any further checks.
|
||||
if newGlobalAuthZPluginFn() != nil {
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
@@ -235,54 +234,86 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
return nil, ErrNoAccessKey
|
||||
}
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
|
||||
if token == "" && cred.IsTemp() {
|
||||
// Temporary credentials should always have x-amz-security-token
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
|
||||
if token != "" && !cred.IsTemp() {
|
||||
// x-amz-security-token should not present for static credentials.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
return claims, ErrNone
|
||||
|
||||
if cred.IsTemp() && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
// validate token for temporary credentials only.
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
secret := globalActiveCred.SecretKey
|
||||
if cred.IsServiceAccount() {
|
||||
token = cred.SessionToken
|
||||
secret = cred.SecretKey
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
claims, err := getClaimsFromTokenWithSecret(token, secret)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
//
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
|
||||
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
|
||||
logger.GetReqInfo(ctx).BucketName = bucketName
|
||||
logger.GetReqInfo(ctx).ObjectName = objectName
|
||||
|
||||
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action)
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
|
||||
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
if logger.GetReqInfo(ctx) == nil {
|
||||
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.Minio)
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
return s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
@@ -292,52 +323,67 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
region = ""
|
||||
}
|
||||
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
return s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
var locationConstraint string
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
|
||||
// region is valid only for CreateBucketAction.
|
||||
var region string
|
||||
if action == policy.CreateBucketAction {
|
||||
// To extract region from XML in request body, get copy of request body.
|
||||
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
return cred, owner, ErrMalformedXML
|
||||
return ErrMalformedXML
|
||||
}
|
||||
|
||||
// Populate payload to extract location constraint.
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
|
||||
var s3Error APIErrorCode
|
||||
locationConstraint, s3Error = parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
return cred, owner, s3Error
|
||||
r.Body = io.NopCloser(bytes.NewReader(payload))
|
||||
region, s3Err = parseLocationConstraint(r)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
// Populate payload again to handle it in HTTP handler.
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
r.Body = io.NopCloser(bytes.NewReader(payload))
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).Region = region
|
||||
|
||||
return s3Err
|
||||
}
|
||||
|
||||
func authorizeRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
if reqInfo == nil {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
cred := reqInfo.Cred
|
||||
owner := reqInfo.Owner
|
||||
region := reqInfo.Region
|
||||
bucket := reqInfo.BucketName
|
||||
object := reqInfo.ObjectName
|
||||
|
||||
if action != policy.ListAllMyBucketsAction && cred.AccessKey == "" {
|
||||
// Anonymous checks are not meant for ListBuckets action
|
||||
// Anonymous checks are not meant for ListAllBuckets action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, region, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
ObjectName: object,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -346,31 +392,31 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, region, "", nil),
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
ObjectName: object,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
return ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred, owner, ErrAccessDenied
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
ObjectName: object,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
if action == policy.ListBucketVersionsAction {
|
||||
@@ -380,18 +426,41 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
BucketName: bucket,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
ObjectName: object,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
return ErrNone
|
||||
}
|
||||
}
|
||||
|
||||
return cred, owner, ErrAccessDenied
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// Check request auth type verifies the incoming http request
|
||||
// - validates the request signature
|
||||
// - validates the policy action if anonymous tests bucket policies if any,
|
||||
// for authenticated requests validates IAM policies.
|
||||
//
|
||||
// returns APIErrorCode if any to be replied to the client.
|
||||
// Additionally returns the accessKey used in the request, and if this request is by an admin.
|
||||
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
|
||||
s3Err = authenticateRequest(ctx, r, action)
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
if reqInfo == nil {
|
||||
return cred, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
cred = reqInfo.Cred
|
||||
owner = reqInfo.Owner
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, authorizeRequest(ctx, r, action)
|
||||
}
|
||||
|
||||
// Verify if request has valid AWS Signature Version '2'.
|
||||
@@ -473,11 +542,18 @@ func isSupportedS3AuthType(aType authType) bool {
|
||||
func setAuthHandler(h http.Handler) http.Handler {
|
||||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
|
||||
|
||||
aType := getRequestAuthType(r)
|
||||
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
amzDate, errCode := parseAmzDateHeader(r)
|
||||
if errCode != ErrNone {
|
||||
if ok {
|
||||
tc.FuncName = "handler.Auth"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
@@ -489,6 +565,11 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
// or in the future, reject request otherwise.
|
||||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
if ok {
|
||||
tc.FuncName = "handler.Auth"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
@@ -498,6 +579,12 @@ func setAuthHandler(h http.Handler) http.Handler {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
tc.FuncName = "handler.Auth"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsAuth, 1)
|
||||
})
|
||||
@@ -537,7 +624,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, cred.Claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.UTC().Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
@@ -577,22 +664,22 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
region := globalSite.Region
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = region
|
||||
|
||||
// Do not check for PutObjectRetentionAction permission,
|
||||
// if mode and retain until date are not set.
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -32,13 +31,19 @@ import (
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
|
||||
func (r *nullReader) Read(b []byte) (int, error) {
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Test get request auth type.
|
||||
func TestGetRequestAuthType(t *testing.T) {
|
||||
type testCase struct {
|
||||
req *http.Request
|
||||
authT authType
|
||||
}
|
||||
nopCloser := ioutil.NopCloser(io.LimitReader(&nullReader{}, 1024))
|
||||
nopCloser := io.NopCloser(io.LimitReader(&nullReader{}, 1024))
|
||||
testCases := []testCase{
|
||||
// Test case - 1
|
||||
// Check for generic signature v4 header.
|
||||
@@ -341,7 +346,8 @@ func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength in
|
||||
}
|
||||
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
|
||||
body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
body io.ReadSeeker, t *testing.T,
|
||||
) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
|
||||
cred := globalActiveCred
|
||||
@@ -353,7 +359,10 @@ func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int6
|
||||
|
||||
// Tests is requested authenticated function, tests replies for s3 errors.
|
||||
func TestIsReqAuthenticated(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
ctx, cancel := context.WithCancel(GlobalContext)
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -362,10 +371,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
initAllSubsystems()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
initAllSubsystems(ctx)
|
||||
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
@@ -399,7 +405,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
}
|
||||
}
|
||||
@@ -407,7 +413,10 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -433,7 +442,6 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedV2Request(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, iampolicy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
@@ -445,7 +453,7 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -455,8 +463,7 @@ func TestValidateAdminSignature(t *testing.T) {
|
||||
t.Fatalf("unable initialize config file, %s", err)
|
||||
}
|
||||
|
||||
initAllSubsystems()
|
||||
|
||||
initAllSubsystems(ctx)
|
||||
initConfigSubsystem(ctx, objLayer)
|
||||
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
32
cmd/authtype_string.go
Normal file
32
cmd/authtype_string.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Code generated by "stringer -type=authType -trimprefix=authType auth-handler.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[authTypeUnknown-0]
|
||||
_ = x[authTypeAnonymous-1]
|
||||
_ = x[authTypePresigned-2]
|
||||
_ = x[authTypePresignedV2-3]
|
||||
_ = x[authTypePostPolicy-4]
|
||||
_ = x[authTypeStreamingSigned-5]
|
||||
_ = x[authTypeSigned-6]
|
||||
_ = x[authTypeSignedV2-7]
|
||||
_ = x[authTypeJWT-8]
|
||||
_ = x[authTypeSTS-9]
|
||||
}
|
||||
|
||||
const _authType_name = "UnknownAnonymousPresignedPresignedV2PostPolicyStreamingSignedSignedSignedV2JWTSTS"
|
||||
|
||||
var _authType_index = [...]uint8{0, 7, 16, 25, 36, 46, 61, 67, 75, 78, 81}
|
||||
|
||||
func (i authType) String() string {
|
||||
if i < 0 || i >= authType(len(_authType_index)-1) {
|
||||
return "authType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _authType_name[_authType_index[i]:_authType_index[i+1]]
|
||||
}
|
||||
@@ -21,13 +21,14 @@ import (
|
||||
"context"
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
// path: '/' => Heal disk formats along with metadata
|
||||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
//
|
||||
// path: '/' => Heal disk formats along with metadata
|
||||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
type healTask struct {
|
||||
bucket string
|
||||
object string
|
||||
@@ -49,10 +50,10 @@ type healRoutine struct {
|
||||
workers int
|
||||
}
|
||||
|
||||
func systemIO() int {
|
||||
func activeListeners() int {
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
return int(globalHTTPListen.NumSubscribers()) + int(globalTrace.NumSubscribers())
|
||||
return int(globalHTTPListen.Subscribers()) + int(globalTrace.Subscribers())
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq() {
|
||||
@@ -61,7 +62,7 @@ func waitForLowHTTPReq() {
|
||||
currentIO = httpServer.GetRequestCount
|
||||
}
|
||||
|
||||
globalHealConfig.Wait(currentIO, systemIO)
|
||||
globalHealConfig.Wait(currentIO, activeListeners)
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
@@ -26,15 +26,12 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/console"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -82,6 +79,10 @@ type healingTracker struct {
|
||||
|
||||
// Filled during heal.
|
||||
HealedBuckets []string
|
||||
|
||||
// ID of the current healing operation
|
||||
HealID string
|
||||
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
@@ -90,14 +91,14 @@ type healingTracker struct {
|
||||
// The disk ID will be validated against the loaded one.
|
||||
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
|
||||
if disk == nil {
|
||||
return nil, errors.New("loadHealingTracker: nil disk given")
|
||||
return nil, errors.New("loadHealingTracker: nil drive given")
|
||||
}
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := disk.ReadAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename))
|
||||
pathJoin(bucketMetaPrefix, healingTrackerFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -107,7 +108,7 @@ func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker,
|
||||
return nil, err
|
||||
}
|
||||
if h.ID != diskID && h.ID != "" {
|
||||
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
|
||||
return nil, fmt.Errorf("loadHealingTracker: drive id mismatch expected %s, got %s", h.ID, diskID)
|
||||
}
|
||||
h.disk = disk
|
||||
h.ID = diskID
|
||||
@@ -115,11 +116,12 @@ func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker,
|
||||
}
|
||||
|
||||
// newHealingTracker will create a new healing tracker for the disk.
|
||||
func newHealingTracker(disk StorageAPI) *healingTracker {
|
||||
func newHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
diskID, _ := disk.GetDiskID()
|
||||
h := healingTracker{
|
||||
disk: disk,
|
||||
ID: diskID,
|
||||
HealID: healID,
|
||||
Path: disk.String(),
|
||||
Endpoint: disk.Endpoint().String(),
|
||||
Started: time.Now().UTC(),
|
||||
@@ -132,7 +134,7 @@ func newHealingTracker(disk StorageAPI) *healingTracker {
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
|
||||
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
|
||||
}
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
@@ -158,15 +160,19 @@ func (h *healingTracker) save(ctx context.Context) error {
|
||||
}
|
||||
globalBackgroundHealState.updateHealStatus(h)
|
||||
return h.disk.WriteAll(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
pathJoin(bucketMetaPrefix, healingTrackerFilename),
|
||||
htrackerBytes)
|
||||
}
|
||||
|
||||
// delete the tracker on disk.
|
||||
func (h *healingTracker) delete(ctx context.Context) error {
|
||||
return h.disk.Delete(ctx, minioMetaBucket,
|
||||
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
|
||||
false)
|
||||
pathJoin(bucketMetaPrefix, healingTrackerFilename),
|
||||
DeleteOptions{
|
||||
Recursive: false,
|
||||
Force: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
@@ -226,6 +232,7 @@ func (h *healingTracker) printTo(writer io.Writer) {
|
||||
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
return madmin.HealingDisk{
|
||||
ID: h.ID,
|
||||
HealID: h.HealID,
|
||||
Endpoint: h.Endpoint,
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
@@ -258,26 +265,9 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
bgSeq := mustGetHealSequence(ctx)
|
||||
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
|
||||
if drivesToHeal := globalBackgroundHealState.healDriveCount(); drivesToHeal > 0 {
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, waiting until %s to heal the content - use 'mc admin heal alias/ --verbose' to check the status",
|
||||
drivesToHeal, defaultMonitorNewDiskInterval))
|
||||
|
||||
// Heal any disk format and metadata early, if possible.
|
||||
// Start with format healing
|
||||
if err := bgSeq.healDiskFormat(); err != nil {
|
||||
if newObjectLayerFn() != nil {
|
||||
// log only in situations, when object layer
|
||||
// has fully initialized.
|
||||
logger.LogIf(bgSeq.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go monitorLocalDisksAndHeal(ctx, z, bgSeq)
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
@@ -299,10 +289,131 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
return disksToHeal
|
||||
}
|
||||
|
||||
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
||||
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: %w, %s", err, endpoint)
|
||||
}
|
||||
defer disk.Close()
|
||||
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||
if poolIdx < 0 {
|
||||
return fmt.Errorf("unexpected pool index (%d) found in %s", poolIdx, disk.Endpoint())
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
z.serverPools[poolIdx].erasureDisksMu.RLock()
|
||||
setIdx, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
|
||||
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if setIdx < 0 {
|
||||
return fmt.Errorf("unexpected set index (%d) found in %s", setIdx, disk.Endpoint())
|
||||
}
|
||||
|
||||
// Prevent parallel erasure set healing
|
||||
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
|
||||
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer locker.Unlock(lkctx)
|
||||
|
||||
// Load healing tracker in this disk
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
// A healing track can be not found when another disk in the same
|
||||
// erasure set and same healing-id successfully finished healing.
|
||||
if err == errFileNotFound {
|
||||
return nil
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load a healing tracker on '%s': %w", disk, err))
|
||||
tracker = newHealingTracker(disk, mustGetUUID())
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("Proceeding to heal '%s' - 'mc admin heal alias/ --verbose' to check the status.", endpoint))
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
})
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
|
||||
if a != b {
|
||||
return a
|
||||
}
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing drive '%v' on %s pool", disk, humanize.Ordinal(poolIdx+1))
|
||||
}
|
||||
|
||||
// Load bucket totals
|
||||
cache := dataUsageCache{}
|
||||
if err := cache.load(ctx, z.serverPools[poolIdx].sets[setIdx], dataUsageCacheName); err == nil {
|
||||
dataUsageInfo := cache.dui(dataUsageRoot, nil)
|
||||
tracker.ObjectsTotalCount = dataUsageInfo.ObjectsTotalCount
|
||||
tracker.ObjectsTotalSize = dataUsageInfo.ObjectsTotalSize
|
||||
}
|
||||
|
||||
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
|
||||
tracker.setQueuedBuckets(buckets)
|
||||
if err := tracker.save(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start or resume healing of this erasure set
|
||||
if err = z.serverPools[poolIdx].sets[setIdx].healErasureSet(ctx, tracker.QueuedBuckets, tracker); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
logger.Info("Healing drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
} else {
|
||||
logger.Info("Healing drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.Info("\n")
|
||||
}
|
||||
|
||||
if tracker.HealID == "" { // HealID is empty only before Feb 2023
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove .healing.bin from all disks with similar heal-id
|
||||
for _, disk := range z.serverPools[poolIdx].sets[setIdx].getDisks() {
|
||||
t, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
if err != errFileNotFound {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if t.HealID == tracker.HealID {
|
||||
t.delete(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq *healSequence) {
|
||||
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
||||
// Perform automatic disk healing when a disk is replaced locally.
|
||||
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
|
||||
defer diskCheckTimer.Stop()
|
||||
@@ -312,142 +423,37 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-diskCheckTimer.C:
|
||||
// Reset to next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.queueHealTask(healSource{bucket: nopHeal}, madmin.HealItemMetadata)
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal - 'mc admin heal alias/ --verbose' to check the status.",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
if len(healDisks) == 0 {
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
if serverDebugLog && len(healDisks) > 0 {
|
||||
console.Debugf(color.Green("healDisk:")+" disk check timer fired, attempting to heal %d drives\n", len(healDisks))
|
||||
// Reformat disks immediately
|
||||
_, err := z.HealFormat(context.Background(), false)
|
||||
if err != nil && !errors.Is(err, errNoHealRequired) {
|
||||
logger.LogIf(ctx, err)
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
// heal only if new disks found.
|
||||
for _, endpoint := range healDisks {
|
||||
disk, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, true)
|
||||
continue
|
||||
}
|
||||
|
||||
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||
if poolIdx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
z.serverPools[poolIdx].erasureDisksMu.RLock()
|
||||
// Protect reading reference format.
|
||||
setIndex, _, err := findDiskIndex(z.serverPools[poolIdx].format, format)
|
||||
z.serverPools[poolIdx].erasureDisksMu.RUnlock()
|
||||
if err != nil {
|
||||
printEndpointError(endpoint, err, false)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
})
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
|
||||
if a != b {
|
||||
return a
|
||||
}
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
// TODO(klauspost): This will block until all heals are done,
|
||||
// in the future this should be able to start healing other sets at once.
|
||||
var wg sync.WaitGroup
|
||||
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||
i := i
|
||||
for setIndex, disks := range setMap {
|
||||
if len(disks) == 0 {
|
||||
continue
|
||||
for _, disk := range healDisks {
|
||||
go func(disk Endpoint) {
|
||||
globalBackgroundHealState.markDiskForHealing(disk)
|
||||
err := healFreshDisk(ctx, z, disk)
|
||||
if err != nil {
|
||||
printEndpointError(disk, err, false)
|
||||
return
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(setIndex int, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
for _, disk := range disks {
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
}
|
||||
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', disk was swapped again on %s pool: %w",
|
||||
disk, humanize.Ordinal(i+1), err))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
|
||||
// Load bucket totals
|
||||
cache := dataUsageCache{}
|
||||
if err := cache.load(ctx, z.serverPools[i].sets[setIndex], dataUsageCacheName); err == nil {
|
||||
dataUsageInfo := cache.dui(dataUsageRoot, nil)
|
||||
tracker.ObjectsTotalCount = dataUsageInfo.ObjectsTotalCount
|
||||
tracker.ObjectsTotalSize = dataUsageInfo.ObjectsTotalSize
|
||||
}
|
||||
|
||||
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
|
||||
tracker.setQueuedBuckets(buckets)
|
||||
if err := tracker.save(ctx); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
// Unable to write healing tracker, permission denied or some
|
||||
// other unexpected error occurred. Proceed to look for new
|
||||
// disks to be healed again, we cannot proceed further.
|
||||
return
|
||||
}
|
||||
|
||||
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, tracker.QueuedBuckets, tracker)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
|
||||
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
|
||||
logger.Info("Summary:\n")
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.Info("\n")
|
||||
}
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
}
|
||||
}(setIndex, disks)
|
||||
}
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk)
|
||||
}(disk)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,6 +182,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealID":
|
||||
z.HealID, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -195,9 +201,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 22
|
||||
// map header, size 23
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x16, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x17, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -430,15 +436,25 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "HealID"
|
||||
err = en.Append(0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.HealID)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealID")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 22
|
||||
// map header, size 23
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x16, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x17, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
@@ -509,6 +525,9 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
o = msgp.AppendString(o, z.HealedBuckets[za0002])
|
||||
}
|
||||
// string "HealID"
|
||||
o = append(o, 0xa6, 0x48, 0x65, 0x61, 0x6c, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.HealID)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -688,6 +707,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case "HealID":
|
||||
z.HealID, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "HealID")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -710,5 +735,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID)
|
||||
return
|
||||
}
|
||||
|
||||
1228
cmd/batch-handlers.go
Normal file
1228
cmd/batch-handlers.go
Normal file
File diff suppressed because it is too large
Load Diff
2926
cmd/batch-handlers_gen.go
Normal file
2926
cmd/batch-handlers_gen.go
Normal file
File diff suppressed because it is too large
Load Diff
1253
cmd/batch-handlers_gen_test.go
Normal file
1253
cmd/batch-handlers_gen_test.go
Normal file
File diff suppressed because it is too large
Load Diff
23
cmd/batchreplicationmetric_string.go
Normal file
23
cmd/batchreplicationmetric_string.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Code generated by "stringer -type=batchReplicationMetric -trimprefix=batchReplicationMetric batch-handlers.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[batchReplicationMetricObject-0]
|
||||
}
|
||||
|
||||
const _batchReplicationMetric_name = "Object"
|
||||
|
||||
var _batchReplicationMetric_index = [...]uint8{0, 6}
|
||||
|
||||
func (i batchReplicationMetric) String() string {
|
||||
if i >= batchReplicationMetric(len(_batchReplicationMetric_index)-1) {
|
||||
return "batchReplicationMetric(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _batchReplicationMetric_name[_batchReplicationMetric_index[i]:_batchReplicationMetric_index[i+1]]
|
||||
}
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.PutObject().
|
||||
@@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -85,12 +85,12 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
|
||||
// PutObjectPart returns etag of the object inserted.
|
||||
// etag variable is assigned with that value.
|
||||
var etag, uploadID string
|
||||
var etag string
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
md5hex := getMD5Hash(textPartData)
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
@@ -146,6 +147,16 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
}
|
||||
ignoredErrs := []error{
|
||||
errDiskNotFound,
|
||||
}
|
||||
if strings.HasPrefix(b.volume, minioMetaBucket) {
|
||||
ignoredErrs = append(ignoredErrs,
|
||||
errFileNotFound,
|
||||
errVolumeNotFound,
|
||||
errFileVersionNotFound,
|
||||
)
|
||||
}
|
||||
if b.rc == nil {
|
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset
|
||||
@@ -153,9 +164,11 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
if len(b.data) == 0 && b.tillOffset != streamOffset {
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext,
|
||||
fmt.Errorf("Error(%w) reading erasure shards at (%s: %s/%s), will attempt to reconstruct if we have quorum",
|
||||
err, b.disk, b.volume, b.filePath))
|
||||
if !IsErr(err, ignoredErrs...) {
|
||||
logger.LogIf(GlobalContext,
|
||||
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
|
||||
b.disk, b.volume, b.filePath, err))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
|
||||
@@ -180,7 +193,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
|
||||
return 0, errFileCorrupt
|
||||
}
|
||||
|
||||
@@ -38,12 +38,12 @@ type wholeBitrotWriter struct {
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@@ -72,12 +72,12 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
|
||||
@@ -20,17 +20,11 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
tmpDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
volume := "testvol"
|
||||
filePath := "testfile"
|
||||
@@ -60,7 +54,9 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
writer.(io.Closer).Close()
|
||||
if bw, ok := writer.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
|
||||
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
|
||||
b := make([]byte, 10)
|
||||
@@ -76,6 +72,9 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
if _, err = reader.ReadAt(b[:5], 30); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if br, ok := reader.(io.Closer); ok {
|
||||
br.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllBitrotAlgorithms(t *testing.T) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -64,6 +64,7 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
|
||||
s1.MinioPlatform, s2.MinioPlatform)
|
||||
}
|
||||
|
||||
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
|
||||
s2.MinioEndpoints.NEndpoints())
|
||||
@@ -200,15 +201,18 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointServerPools)
|
||||
var onlineServers int
|
||||
var offlineEndpoints []string
|
||||
var offlineEndpoints []error
|
||||
var incorrectConfigs []error
|
||||
var retries int
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(ctx, srcCfg); err != nil {
|
||||
if !isNetworkError(err) {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err), clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt.String(), err))
|
||||
} else {
|
||||
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt.String(), err))
|
||||
}
|
||||
offlineEndpoints = append(offlineEndpoints, clnt.String())
|
||||
continue
|
||||
}
|
||||
onlineServers++
|
||||
@@ -221,15 +225,19 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retries++
|
||||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
// after 20 retries start logging that servers are not reachable yet
|
||||
if retries >= 20 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers with valid configuration to be online", len(clnts)/2))
|
||||
if len(offlineEndpoints) > 0 {
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
}
|
||||
if len(incorrectConfigs) > 0 {
|
||||
logger.Info(fmt.Sprintf("Following servers mismatch in their configuration %s", incorrectConfigs))
|
||||
}
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
incorrectConfigs = nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
@@ -65,7 +65,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
@@ -108,7 +108,8 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -122,6 +123,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -153,12 +155,12 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
|
||||
// Check if bucket exists
|
||||
var err error
|
||||
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err = objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -196,13 +198,14 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
|
||||
// Check if bucket exists
|
||||
var err error
|
||||
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err = objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, nil); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketSSEConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -212,6 +215,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@@ -34,16 +34,8 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
||||
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (*sse.BucketSSEConfig, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
sseCfg, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
||||
return sseCfg, err
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -33,12 +32,11 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
@@ -69,11 +67,16 @@ const (
|
||||
// - Check if a bucket has an entry in etcd backend
|
||||
// -- If no, make an entry
|
||||
// -- If yes, check if the entry matches local IP check if we
|
||||
// need to update the entry then proceed to update
|
||||
//
|
||||
// need to update the entry then proceed to update
|
||||
//
|
||||
// -- If yes, check if the IP of entry matches local IP.
|
||||
// This means entry is for this instance.
|
||||
//
|
||||
// This means entry is for this instance.
|
||||
//
|
||||
// -- If IP of the entry doesn't match, this means entry is
|
||||
// for another instance. Log an error to console.
|
||||
//
|
||||
// for another instance. Log an error to console.
|
||||
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
if len(buckets) == 0 {
|
||||
return
|
||||
@@ -203,7 +206,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := getBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -229,7 +232,6 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// using the Initiate Multipart Upload request, but has not yet been
|
||||
// completed or aborted. This operation returns at most 1,000 multipart
|
||||
// uploads in the response.
|
||||
//
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListMultipartUploads")
|
||||
|
||||
@@ -298,7 +300,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
listBuckets := objectAPI.ListBuckets
|
||||
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction, "", "")
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction)
|
||||
if s3Error != ErrNone && s3Error != ErrAccessDenied {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
@@ -334,7 +336,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
var err error
|
||||
bucketsInfo, err = listBuckets(ctx)
|
||||
bucketsInfo, err = listBuckets(ctx, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -364,6 +366,18 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
} else if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: iampolicy.GetBucketLocationAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
}
|
||||
}
|
||||
bucketsInfo = bucketsInfo[:n]
|
||||
@@ -417,7 +431,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjectsReq := &DeleteObjectsRequest{}
|
||||
if err := xmlDecoder(r.Body, deleteObjectsReq, maxBodySize); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -437,7 +450,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucket)
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -471,9 +484,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
hasLockEnabled = true
|
||||
}
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
|
||||
type deleteResult struct {
|
||||
delInfo DeletedObject
|
||||
errInfo DeleteError
|
||||
@@ -481,8 +491,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteResults := make([]deleteResult, len(deleteObjectsReq.Objects))
|
||||
|
||||
vc, _ := globalBucketVersioningSys.Get(bucket)
|
||||
oss := make([]*objSweeper, len(deleteObjectsReq.Objects))
|
||||
|
||||
for index, object := range deleteObjectsReq.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
@@ -500,11 +510,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
if object.VersionID != "" && object.VersionID != nullVersionID {
|
||||
if _, err := uuid.Parse(object.VersionID); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid version-id specified %w", err))
|
||||
apiErr := errorCodes.ToAPIErr(ErrNoSuchVersion)
|
||||
deleteResults[index].errInfo = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Message: fmt.Sprintf("%s (%s)", apiErr.Description, err),
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
@@ -514,8 +523,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
opts := ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
Versioned: vc.PrefixEnabled(object.ObjectName),
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
|
||||
if replicateDeletes || object.VersionID != "" && hasLockEnabled || !globalTierConfigMgr.Empty() {
|
||||
@@ -526,7 +535,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(opts.VersionID).WithVersioning(versioned, suspended)
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(opts.VersionID).WithVersioning(opts.Versioned, opts.VersionSuspended)
|
||||
oss[index].SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
|
||||
@@ -581,8 +590,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
})
|
||||
|
||||
for i := range errs {
|
||||
@@ -638,22 +647,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
if dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending {
|
||||
dv := DeletedObjectReplicationInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
}
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI)
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
dv := DeletedObjectReplicationInfo{
|
||||
DeletedObject: dobj,
|
||||
Bucket: bucket,
|
||||
EventType: ReplicateIncomingDelete,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
scheduleReplicationDelete(ctx, dv, objectAPI)
|
||||
}
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
@@ -706,20 +706,53 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectLockEnabled := false
|
||||
if vs, found := r.Header[http.CanonicalHeaderKey("x-amz-bucket-object-lock-enabled")]; found {
|
||||
v := strings.ToLower(strings.Join(vs, ""))
|
||||
if v != "true" && v != "false" {
|
||||
if vs := r.Header.Get(xhttp.AmzObjectLockEnabled); len(vs) > 0 {
|
||||
v := strings.ToLower(vs)
|
||||
switch v {
|
||||
case "true", "false":
|
||||
objectLockEnabled = v == "true"
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
objectLockEnabled = v == "true"
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
forceCreate := false
|
||||
if vs := r.Header.Get(xhttp.MinIOForceCreate); len(vs) > 0 {
|
||||
v := strings.ToLower(vs)
|
||||
switch v {
|
||||
case "true", "false":
|
||||
forceCreate = v == "true"
|
||||
default:
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.CreateBucketAction)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
// Creating a bucket with locking requires the user having more permissions
|
||||
for _, action := range []iampolicy.Action{iampolicy.PutBucketObjectLockConfigurationAction, iampolicy.PutBucketVersioningAction} {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
BucketName: bucket,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse incoming location constraint.
|
||||
location, s3Error := parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
@@ -734,9 +767,14 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
opts := BucketOptions{
|
||||
Location: location,
|
||||
// check if client is attempting to create more buckets, complain about it.
|
||||
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
|
||||
logger.LogIf(ctx, fmt.Errorf("An attempt to create %d buckets beyond recommended %d", currBuckets+1, maxBuckets))
|
||||
}
|
||||
|
||||
opts := MakeBucketOptions{
|
||||
LockEnabled: objectLockEnabled,
|
||||
ForceCreate: forceCreate,
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
@@ -746,13 +784,16 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
// exists elsewhere
|
||||
if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented {
|
||||
// Proceed to creating a bucket.
|
||||
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
|
||||
if err = objectAPI.MakeBucket(ctx, bucket, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalDNSConfig.Put(bucket); err != nil {
|
||||
objectAPI.DeleteBucket(context.Background(), bucket, DeleteBucketOptions{Force: false, NoRecreate: true})
|
||||
objectAPI.DeleteBucket(context.Background(), bucket, DeleteBucketOptions{
|
||||
Force: true,
|
||||
SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
|
||||
})
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -794,19 +835,14 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
|
||||
if _, ok := err.(BucketExists); ok {
|
||||
// Though bucket exists locally, we send the site-replication
|
||||
// hook to ensure all sites have this bucket. If the hook
|
||||
// succeeds, the client will still receive a bucket exists
|
||||
// message.
|
||||
err2 := globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
if err2 != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
if err := objectAPI.MakeBucket(ctx, bucket, opts); err != nil {
|
||||
if _, ok := err.(BucketExists); ok {
|
||||
// Though bucket exists locally, we send the site-replication
|
||||
// hook to ensure all sites have this bucket. If the hook
|
||||
// succeeds, the client will still receive a bucket exists
|
||||
// message.
|
||||
globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -815,11 +851,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Call site replication hook
|
||||
err = globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
if cp := pathClean(r.URL.Path); cp != "" {
|
||||
@@ -858,7 +890,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -888,16 +920,18 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Read multipart data and save in memory and in the disk if needed
|
||||
form, err := reader.ReadForm(maxFormMemory)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -907,8 +941,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Extract all form fields
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1023,10 +1058,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
|
||||
sseConfig.Apply(r.Header, sse.ApplyOptions{
|
||||
AutoEncrypt: globalAutoEncryption,
|
||||
Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway,
|
||||
})
|
||||
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
@@ -1034,7 +1067,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, ok := crypto.IsRequested(formValues); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.Requested(formValues) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
return
|
||||
@@ -1060,7 +1093,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
}
|
||||
reader, objectEncryptionKey, err = newEncryptReader(hashReader, kind, keyID, key, bucket, object, metadata, kmsCtx)
|
||||
reader, objectEncryptionKey, err = newEncryptReader(ctx, hashReader, kind, keyID, key, bucket, object, metadata, kmsCtx)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1109,9 +1142,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
if successRedirect != "" {
|
||||
// Replace raw query params..
|
||||
redirectURL.RawQuery = getRedirectPostRawQuery(objInfo)
|
||||
if redirectURL != nil { // success_action_redirect is valid and set.
|
||||
v := redirectURL.Query()
|
||||
v.Add("bucket", objInfo.Bucket)
|
||||
v.Add("key", objInfo.Name)
|
||||
v.Add("etag", "\""+objInfo.ETag+"\"")
|
||||
redirectURL.RawQuery = v.Encode()
|
||||
writeRedirectSeeOther(w, redirectURL.String())
|
||||
return
|
||||
}
|
||||
@@ -1155,7 +1191,7 @@ func (api objectAPIHandlers) GetBucketPolicyStatusHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1218,7 +1254,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
|
||||
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := getBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
@@ -1295,7 +1331,10 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket, DeleteBucketOptions{Force: forceDelete}); err != nil {
|
||||
if err := deleteBucket(ctx, bucket, DeleteBucketOptions{
|
||||
Force: forceDelete,
|
||||
SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
|
||||
}); err != nil {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
if _, ok := err.(BucketNotEmpty); ok {
|
||||
if globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket) {
|
||||
@@ -1350,10 +1389,6 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
@@ -1374,12 +1409,13 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
}
|
||||
|
||||
// Deny object locking configuration settings on existing buckets without object lock enabled.
|
||||
if _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1393,6 +1429,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1427,7 +1464,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1460,7 +1497,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1484,7 +1521,8 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1495,9 +1533,10 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1529,7 +1568,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1566,14 +1605,16 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, nil); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketTaggingConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1582,375 +1623,3 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
|
||||
// ----------
|
||||
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
|
||||
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
|
||||
return
|
||||
}
|
||||
replicationConfig, err := replication.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig)
|
||||
if apiErr != noError {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
// Validate the received bucket replication config
|
||||
if err = replicationConfig.Validate(bucket, sameTarget); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(replicationConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
|
||||
// ----------
|
||||
// Gets the replication configuration for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationConfig")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
|
||||
// ----------
|
||||
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if globalSiteReplicationSys.isEnabled() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
|
||||
// ----------
|
||||
// Gets the replication metrics for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationMetrics")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
var usageInfo BucketUsageInfo
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err == nil && !dataUsageInfo.LastUpdate.IsZero() {
|
||||
usageInfo = dataUsageInfo.BucketsUsage[bucket]
|
||||
}
|
||||
|
||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err = enc.Encode(getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
|
||||
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
|
||||
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
|
||||
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
|
||||
// already in progress it returns an error
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStart")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
durationStr := r.URL.Query().Get("older-than")
|
||||
arn := r.URL.Query().Get("arn")
|
||||
resetID := r.URL.Query().Get("reset-id")
|
||||
if resetID == "" {
|
||||
resetID = mustGetUUID()
|
||||
}
|
||||
var (
|
||||
days time.Duration
|
||||
err error
|
||||
)
|
||||
if durationStr != "" {
|
||||
days, err = time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("invalid query parameter older-than %s for %s : %w", durationStr, bucket, err),
|
||||
}), r.URL)
|
||||
}
|
||||
}
|
||||
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if !config.HasActiveRules("", true) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNoMatchingRuleError), r.URL)
|
||||
return
|
||||
}
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if len(tgtArns) > 1 && arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("ARN should be specified for replication reset"),
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
var rinfo ResyncTargetsInfo
|
||||
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArns[0])
|
||||
target.ResetBeforeDate = UTCNow().AddDate(0, 0, -1*int(days/24))
|
||||
target.ResetID = resetID
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
}
|
||||
if err := startReplicationResync(ctx, bucket, arn, resetID, resetBeforeDate, objectAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: err,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
|
||||
// This API is a MinIO only extension
|
||||
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ResetBucketReplicationStatus")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
arn := r.URL.Query().Get("arn")
|
||||
var err error
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
globalReplicationPool.resyncState.RLock()
|
||||
brs, ok := globalReplicationPool.resyncState.statusMap[bucket]
|
||||
if !ok {
|
||||
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Err: fmt.Errorf("No replication resync status available for %s", arn),
|
||||
}), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var rinfo ResyncTargetsInfo
|
||||
for tarn, st := range brs.TargetsMap {
|
||||
if arn != "" && tarn != arn {
|
||||
continue
|
||||
}
|
||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
|
||||
Arn: tarn,
|
||||
ResetID: st.ResyncID,
|
||||
StartTime: st.StartTime,
|
||||
EndTime: st.EndTime,
|
||||
ResyncStatus: st.ResyncStatus.String(),
|
||||
ReplicatedSize: st.ReplicatedSize,
|
||||
ReplicatedCount: st.ReplicatedCount,
|
||||
FailedSize: st.FailedSize,
|
||||
FailedCount: st.FailedCount,
|
||||
Bucket: st.Bucket,
|
||||
Object: st.Object,
|
||||
})
|
||||
}
|
||||
globalReplicationPool.resyncState.RUnlock()
|
||||
data, err := json.Marshal(rinfo)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
@@ -36,7 +36,8 @@ func TestRemoveBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
@@ -884,7 +885,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
}
|
||||
|
||||
// read the response body.
|
||||
actualContent, err = ioutil.ReadAll(rec.Body)
|
||||
actualContent, err = io.ReadAll(rec.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
|
||||
@@ -62,12 +62,12 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(r.Body, r.ContentLength))
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -91,7 +91,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -163,12 +163,12 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
if _, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketLifecycleConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,7 +34,8 @@ func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
|
||||
// Test for authentication
|
||||
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/amztime"
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/event"
|
||||
@@ -57,15 +58,6 @@ type LifecycleSys struct{}
|
||||
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
return globalBucketMetadataSys.GetLifecycleConfig(bucketName)
|
||||
}
|
||||
|
||||
@@ -154,9 +146,14 @@ type newerNoncurrentTask struct {
|
||||
versions []ObjectToDelete
|
||||
}
|
||||
|
||||
type transitionTask struct {
|
||||
tier string
|
||||
objInfo ObjectInfo
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
once sync.Once
|
||||
transitionCh chan ObjectInfo
|
||||
transitionCh chan transitionTask
|
||||
|
||||
ctx context.Context
|
||||
objAPI ObjectLayer
|
||||
@@ -170,13 +167,13 @@ type transitionState struct {
|
||||
lastDayStats map[string]*lastDayTierStats
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo, sc string) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
t.once.Do(func() {
|
||||
close(t.transitionCh)
|
||||
})
|
||||
case t.transitionCh <- oi:
|
||||
case t.transitionCh <- transitionTask{objInfo: oi, tier: sc}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -185,7 +182,7 @@ var globalTransitionState *transitionState
|
||||
|
||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||
return &transitionState{
|
||||
transitionCh: make(chan ObjectInfo, 10000),
|
||||
transitionCh: make(chan transitionTask, 10000),
|
||||
ctx: ctx,
|
||||
objAPI: objAPI,
|
||||
killCh: make(chan struct{}),
|
||||
@@ -212,27 +209,25 @@ func (t *transitionState) worker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
case task, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, 1)
|
||||
var tier string
|
||||
var err error
|
||||
if tier, err = transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
if err := transitionObject(ctx, objectAPI, task.objInfo, task.tier); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w",
|
||||
task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err))
|
||||
} else {
|
||||
ts := tierStats{
|
||||
TotalSize: uint64(oi.Size),
|
||||
TotalSize: uint64(task.objInfo.Size),
|
||||
NumVersions: 1,
|
||||
}
|
||||
if oi.IsLatest {
|
||||
if task.objInfo.IsLatest {
|
||||
ts.NumObjects = 1
|
||||
}
|
||||
t.addLastDayStats(tier, ts)
|
||||
t.addLastDayStats(task.tier, ts)
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, -1)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -247,11 +242,11 @@ func (t *transitionState) addLastDayStats(tier string, ts tierStats) {
|
||||
t.lastDayStats[tier].addStats(ts)
|
||||
}
|
||||
|
||||
func (t *transitionState) getDailyAllTierStats() dailyAllTierStats {
|
||||
func (t *transitionState) getDailyAllTierStats() DailyAllTierStats {
|
||||
t.lastDayMu.RLock()
|
||||
defer t.lastDayMu.RUnlock()
|
||||
|
||||
res := make(dailyAllTierStats, len(t.lastDayStats))
|
||||
res := make(DailyAllTierStats, len(t.lastDayStats))
|
||||
for tier, st := range t.lastDayStats {
|
||||
res[tier] = st.clone()
|
||||
}
|
||||
@@ -303,9 +298,10 @@ func validateTransitionTier(lc *lifecycle.Lifecycle) error {
|
||||
// This is to be called after a successful upload of an object (version).
|
||||
func enqueueTransitionImmediate(obj ObjectInfo) {
|
||||
if lc, err := globalLifecycleSys.Get(obj.Bucket); err == nil {
|
||||
switch lc.ComputeAction(obj.ToLifecycleOpts()) {
|
||||
event := lc.Eval(obj.ToLifecycleOpts())
|
||||
switch event.Action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
globalTransitionState.queueTransitionTask(obj)
|
||||
globalTransitionState.queueTransitionTask(obj, event.StorageClass)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -330,7 +326,7 @@ const (
|
||||
// 2. when a transitioned object expires (based on an ILM rule).
|
||||
func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *ObjectInfo, lcOpts lifecycle.ObjectOpts, action expireAction) error {
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.Versioned = globalBucketVersioningSys.PrefixEnabled(oi.Bucket, oi.Name)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
opts.Expiration = ExpirationOptions{Expire: true}
|
||||
switch action {
|
||||
@@ -402,12 +398,7 @@ func genTransitionObjName(bucket string) (string, error) {
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) (string, error) {
|
||||
lc, err := globalLifecycleSys.Get(oi.Bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tier := lc.TransitionTier(oi.ToLifecycleOpts())
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, tier string) error {
|
||||
opts := ObjectOptions{
|
||||
Transition: TransitionOptions{
|
||||
Status: lifecycle.TransitionPending,
|
||||
@@ -415,11 +406,41 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo)
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(oi.Bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(oi.Bucket, oi.Name),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(oi.Bucket, oi.Name),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
return tier, objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
return objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
}
|
||||
|
||||
type auditTierOp struct {
|
||||
Tier string `json:"tier"`
|
||||
TimeToResponseNS int64 `json:"timeToResponseNS"`
|
||||
OutputBytes int64 `json:"tx,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func auditTierActions(ctx context.Context, tier string, bytes int64) func(err error) {
|
||||
startTime := time.Now()
|
||||
return func(err error) {
|
||||
// Record only when audit targets configured.
|
||||
if len(logger.AuditTargets()) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
op := auditTierOp{
|
||||
Tier: tier,
|
||||
OutputBytes: bytes,
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
op.TimeToResponseNS = time.Since(startTime).Nanoseconds()
|
||||
} else {
|
||||
op.Error = err.Error()
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op)
|
||||
}
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
@@ -441,12 +462,13 @@ func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs
|
||||
gopts.length = length
|
||||
}
|
||||
|
||||
timeTierAction := auditTierActions(ctx, oi.TransitionedObject.Tier, length)
|
||||
reader, err := tgtClient.Get(ctx, oi.TransitionedObject.Name, remoteVersionID(oi.TransitionedObject.VersionID), gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closer := func() {
|
||||
reader.Close()
|
||||
timeTierAction(reader.Close())
|
||||
}
|
||||
return fn(reader, h, closer)
|
||||
}
|
||||
@@ -518,7 +540,7 @@ type RestoreObjectRequest struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest" json:"-"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Type RestoreRequestType `xml:"Type,omitempty"`
|
||||
Tier string `xml:"Tier,-"`
|
||||
Tier string `xml:"Tier"`
|
||||
Description string `xml:"Description,omitempty"`
|
||||
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
|
||||
OutputLocation OutputLocation `xml:"OutputLocation,omitempty"`
|
||||
@@ -560,7 +582,7 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
|
||||
}
|
||||
// Check if bucket exists.
|
||||
if !r.OutputLocation.IsEmpty() {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName, BucketOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.OutputLocation.S3.Prefix == "" {
|
||||
@@ -575,8 +597,8 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
|
||||
|
||||
// postRestoreOpts returns ObjectOptions with version-id from the POST restore object request for a given bucket and object.
|
||||
func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
versionSuspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
|
||||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
vid := strings.TrimSpace(r.Form.Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
@@ -627,8 +649,8 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
meta[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
}
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, object),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(bucket, object),
|
||||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
@@ -640,8 +662,8 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
}
|
||||
|
||||
return ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, object),
|
||||
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(bucket, object),
|
||||
UserDefined: meta,
|
||||
VersionID: objInfo.VersionID,
|
||||
MTime: objInfo.ModTime,
|
||||
@@ -755,7 +777,7 @@ func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
if strings.TrimSpace(expiryTokens[0]) != "expiry-date" {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
expiry, err := time.Parse(http.TimeFormat, strings.Trim(expiryTokens[1], `"`))
|
||||
expiry, err := amztime.ParseHeader(strings.Trim(expiryTokens[1], `"`))
|
||||
if err != nil {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func TestObjectIsRemote(t *testing.T) {
|
||||
if got := fi.IsRemote(); got != tc.remote {
|
||||
t.Fatalf("Test %d.a: expected %v got %v", i+1, tc.remote, got)
|
||||
}
|
||||
oi := fi.ToObjectInfo("bucket", "object")
|
||||
oi := fi.ToObjectInfo("bucket", "object", false)
|
||||
if got := oi.IsRemote(); got != tc.remote {
|
||||
t.Fatalf("Test %d.b: expected %v got %v", i+1, tc.remote, got)
|
||||
}
|
||||
|
||||
@@ -26,33 +26,16 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
|
||||
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
|
||||
for index := range objects {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
size, err := objects[index].GetActualSize()
|
||||
if err == nil {
|
||||
objects[index].Size = size
|
||||
}
|
||||
objects[index].ETag = objects[index].GetActualETag(nil)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by MinIO server are as below
|
||||
// - delimiter if set should be equal to '/', otherwise the request is rejected.
|
||||
// - marker if set should have a common prefix with 'prefix' param, otherwise
|
||||
// the request is rejected.
|
||||
func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
|
||||
// - delimiter if set should be equal to '/', otherwise the request is rejected.
|
||||
// - marker if set should have a common prefix with 'prefix' param, otherwise
|
||||
// the request is rejected.
|
||||
func validateListObjectsArgs(prefix, marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
|
||||
// Max keys cannot be negative.
|
||||
if maxKeys < 0 {
|
||||
return ErrInvalidMaxKeys
|
||||
@@ -65,6 +48,14 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ErrInvalidObjectName
|
||||
}
|
||||
|
||||
if marker != "" && !HasPrefix(marker, prefix) {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
@@ -100,7 +91,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(marker, delimiter, encodingType, maxkeys); s3Error != ErrNone {
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, encodingType, maxkeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -116,7 +107,10 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectVersionsInfo.Objects); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
|
||||
|
||||
@@ -162,7 +156,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
if s3Error := validateListObjectsArgs(prefix, token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -178,7 +172,10 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// The next continuation token has id@node_index format to optimize paginated listing
|
||||
nextContinuationToken := listObjectsV2Info.NextContinuationToken
|
||||
@@ -229,7 +226,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
if s3Error := validateListObjectsArgs(prefix, token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -253,7 +250,10 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsV2Info.Objects); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
@@ -306,7 +306,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
@@ -334,7 +333,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(marker, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -350,7 +349,10 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
|
||||
if err = DecryptETags(ctx, GlobalKMS, listObjectsInfo.Objects); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
bucketsse "github.com/minio/minio/internal/bucket/encryption"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
@@ -44,11 +44,16 @@ type BucketMetadataSys struct {
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
// Count returns number of bucket metadata map entries.
|
||||
func (sys *BucketMetadataSys) Count() int {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
return len(sys.metadataMap)
|
||||
}
|
||||
|
||||
// Remove bucket metadata from memory.
|
||||
func (sys *BucketMetadataSys) Remove(bucket string) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
delete(sys.metadataMap, bucket)
|
||||
globalBucketMonitor.DeleteBucket(bucket)
|
||||
@@ -61,101 +66,91 @@ func (sys *BucketMetadataSys) Remove(bucket string) {
|
||||
// so they should be replaced atomically and not appended to, etc.
|
||||
// Data is not persisted to disk.
|
||||
func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
if bucket != minioMetaBucket {
|
||||
if !isMinioMetaBucketName(bucket) {
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
sys.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configFile string, configData []byte) error {
|
||||
func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string, configFile string, configData []byte, parse bool) (updatedAt time.Time, err error) {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
return updatedAt, errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsGateway && globalGatewayName != NASBackendGateway {
|
||||
if configFile == bucketPolicyConfig {
|
||||
if configData == nil {
|
||||
return objAPI.DeleteBucketPolicy(ctx, bucket)
|
||||
}
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return objAPI.SetBucketPolicy(ctx, bucket, config)
|
||||
}
|
||||
return NotImplemented{}
|
||||
if isMinioMetaBucketName(bucket) {
|
||||
return updatedAt, errInvalidArgument
|
||||
}
|
||||
|
||||
if bucket == minioMetaBucket {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
meta, err := loadBucketMetadataParse(ctx, objAPI, bucket, parse)
|
||||
if err != nil {
|
||||
if !globalIsErasure && !globalIsDistErasure && errors.Is(err, errVolumeNotFound) {
|
||||
// Only single drive mode needs this fallback.
|
||||
meta = newBucketMetadata(bucket)
|
||||
} else {
|
||||
return err
|
||||
return updatedAt, err
|
||||
}
|
||||
}
|
||||
|
||||
updatedAt = UTCNow()
|
||||
switch configFile {
|
||||
case bucketPolicyConfig:
|
||||
meta.PolicyConfigJSON = configData
|
||||
meta.PolicyConfigUpdatedAt = updatedAt
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
meta.EncryptionConfigXML = configData
|
||||
meta.EncryptionConfigUpdatedAt = updatedAt
|
||||
case bucketTaggingConfig:
|
||||
meta.TaggingConfigXML = configData
|
||||
meta.TaggingConfigUpdatedAt = updatedAt
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
meta.QuotaConfigUpdatedAt = updatedAt
|
||||
case objectLockConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ObjectLockConfigXML = configData
|
||||
meta.ObjectLockConfigUpdatedAt = updatedAt
|
||||
case bucketVersioningConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.VersioningConfigXML = configData
|
||||
meta.VersioningConfigUpdatedAt = updatedAt
|
||||
case bucketReplicationConfig:
|
||||
if !globalIsErasure && !globalIsDistErasure {
|
||||
return NotImplemented{}
|
||||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
meta.ReplicationConfigUpdatedAt = updatedAt
|
||||
case bucketTargetsFile:
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, kms.Context{
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(ctx, meta.Name, configData, kms.Context{
|
||||
bucket: meta.Name,
|
||||
bucketTargetsFile: bucketTargetsFile,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
return updatedAt, fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
return updatedAt, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if err := meta.Save(ctx, objAPI); err != nil {
|
||||
return err
|
||||
return updatedAt, err
|
||||
}
|
||||
|
||||
sys.Set(bucket, meta)
|
||||
globalNotificationSys.LoadBucketMetadata(bgContext(ctx), bucket) // Do not use caller context here
|
||||
|
||||
return nil
|
||||
return updatedAt, nil
|
||||
}
|
||||
|
||||
// Delete delete the bucket metadata for the specified bucket.
|
||||
// must be used by all callers instead of using Update() with nil configData.
|
||||
func (sys *BucketMetadataSys) Delete(ctx context.Context, bucket string, configFile string) (updatedAt time.Time, err error) {
|
||||
return sys.updateAndParse(ctx, bucket, configFile, nil, false)
|
||||
}
|
||||
|
||||
// Update update bucket metadata for the specified bucket.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configFile string, configData []byte) (updatedAt time.Time, err error) {
|
||||
return sys.updateAndParse(ctx, bucket, configFile, configData, true)
|
||||
}
|
||||
|
||||
// Get metadata for a bucket.
|
||||
@@ -169,7 +164,7 @@ func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configF
|
||||
// For all other bucket specific metadata, use the relevant
|
||||
// calls implemented specifically for each of those features.
|
||||
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
if globalIsGateway || bucket == minioMetaBucket {
|
||||
if isMinioMetaBucketName(bucket) {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
}
|
||||
|
||||
@@ -186,65 +181,52 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
|
||||
// GetVersioningConfig returns configured versioning config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
|
||||
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, meta.Created, nil
|
||||
}
|
||||
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, time.Time{}, err
|
||||
}
|
||||
return meta.versioningConfig, nil
|
||||
return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.taggingConfig == nil {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.taggingConfig, nil
|
||||
return meta.taggingConfig, meta.TaggingConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetObjectLockConfig returns configured object lock config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
|
||||
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.objectLockConfig == nil {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.objectLockConfig, nil
|
||||
return meta.objectLockConfig, meta.ObjectLockConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetLifecycleConfig returns configured lifecycle config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Lifecycle, error) {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if meta.lifecycleConfig == nil {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.lifecycleConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
@@ -261,19 +243,6 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
||||
// GetNotificationConfig returns configured notification config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
|
||||
if globalIsGateway && globalGatewayName == NASBackendGateway {
|
||||
// Only needed in case of NAS gateway.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta.notificationConfig, nil
|
||||
}
|
||||
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -283,69 +252,73 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi
|
||||
|
||||
// GetSSEConfig returns configured SSE config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.sseConfig == nil {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.sseConfig, nil
|
||||
return meta.sseConfig, meta.EncryptionConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// CreatedAt returns the time of creation of bucket
|
||||
func (sys *BucketMetadataSys) CreatedAt(bucket string) (time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return meta.Created.UTC(), nil
|
||||
}
|
||||
|
||||
// GetPolicyConfig returns configured bucket policy
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, time.Time, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.policyConfig == nil {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.policyConfig, nil
|
||||
return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetQuotaConfig returns configured bucket quota
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, error) {
|
||||
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, time.Time, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, time.Time{}, BucketQuotaConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
return meta.quotaConfig, nil
|
||||
return meta.quotaConfig, meta.QuotaConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetReplicationConfig returns configured bucket replication config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) {
|
||||
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, time.Time, error) {
|
||||
meta, err := sys.GetConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
|
||||
if meta.replicationConfig == nil {
|
||||
return nil, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.replicationConfig, nil
|
||||
return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetBucketTargetsConfig returns configured bucket targets for this bucket
|
||||
@@ -353,6 +326,9 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s
|
||||
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
|
||||
meta, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if meta.bucketTargetConfig == nil {
|
||||
@@ -361,20 +337,6 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc
|
||||
return meta.bucketTargetConfig, nil
|
||||
}
|
||||
|
||||
// GetBucketTarget returns the target for the bucket and arn.
|
||||
func (sys *BucketMetadataSys) GetBucketTarget(bucket string, arn string) (madmin.BucketTarget, error) {
|
||||
targets, err := sys.GetBucketTargetsConfig(bucket)
|
||||
if err != nil {
|
||||
return madmin.BucketTarget{}, err
|
||||
}
|
||||
for _, t := range targets.Targets {
|
||||
if t.Arn == arn {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return madmin.BucketTarget{}, errConfigNotFound
|
||||
}
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (BucketMetadata, error) {
|
||||
@@ -383,11 +345,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (Buc
|
||||
return newBucketMetadata(bucket), errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsGateway {
|
||||
return newBucketMetadata(bucket), NotImplemented{}
|
||||
}
|
||||
|
||||
if bucket == minioMetaBucket {
|
||||
if isMinioMetaBucketName(bucket) {
|
||||
return newBucketMetadata(bucket), errInvalidArgument
|
||||
}
|
||||
|
||||
@@ -414,12 +372,6 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we don't need to load bucket metadata except
|
||||
// NAS gateway backend.
|
||||
if globalIsGateway && !objAPI.IsNotificationSupported() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load bucket metadata sys in background
|
||||
go sys.load(ctx, buckets, objAPI)
|
||||
return nil
|
||||
@@ -448,7 +400,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
||||
sys.metadataMap[buckets[index].Name] = meta
|
||||
sys.Unlock()
|
||||
|
||||
globalNotificationSys.set(buckets[index], meta) // set notification targets
|
||||
globalEventNotifier.set(buckets[index], meta) // set notification targets
|
||||
|
||||
globalBucketTargetSys.set(buckets[index], meta) // set remote replication targets
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
bucketsse "github.com/minio/minio/internal/bucket/encryption"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
@@ -81,6 +81,13 @@ type BucketMetadata struct {
|
||||
ReplicationConfigXML []byte
|
||||
BucketTargetsConfigJSON []byte
|
||||
BucketTargetsConfigMetaJSON []byte
|
||||
PolicyConfigUpdatedAt time.Time
|
||||
ObjectLockConfigUpdatedAt time.Time
|
||||
EncryptionConfigUpdatedAt time.Time
|
||||
TaggingConfigUpdatedAt time.Time
|
||||
QuotaConfigUpdatedAt time.Time
|
||||
ReplicationConfigUpdatedAt time.Time
|
||||
VersioningConfigUpdatedAt time.Time
|
||||
|
||||
// Unexported fields. Must be updated atomically.
|
||||
policyConfig *policy.Policy
|
||||
@@ -99,8 +106,7 @@ type BucketMetadata struct {
|
||||
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) BucketMetadata {
|
||||
return BucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
Name: name,
|
||||
notificationConfig: &event.Config{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
},
|
||||
@@ -113,12 +119,23 @@ func newBucketMetadata(name string) BucketMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
// SetCreatedAt preserves the CreatedAt time for bucket across sites in site replication. It defaults to
|
||||
// creation time of bucket on this cluster in all other cases.
|
||||
func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) {
|
||||
if b.Created.IsZero() {
|
||||
b.Created = UTCNow()
|
||||
}
|
||||
if !createdAt.IsZero() {
|
||||
b.Created = createdAt.UTC()
|
||||
}
|
||||
}
|
||||
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
if name == "" {
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
return errors.New("bucket name cannot be empty")
|
||||
return errInvalidArgument
|
||||
}
|
||||
configFile := path.Join(bucketMetaPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
@@ -145,20 +162,45 @@ func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string)
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
func loadBucketMetadataParse(ctx context.Context, objectAPI ObjectLayer, bucket string, parse bool) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, b.Name)
|
||||
if err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
return b, err
|
||||
}
|
||||
if err == nil {
|
||||
b.defaultTimestamps()
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil {
|
||||
configs, err := b.getAllLegacyConfigs(ctx, objectAPI)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
|
||||
if len(configs) == 0 {
|
||||
if parse {
|
||||
// nothing to update, parse and proceed.
|
||||
err = b.parseAllConfigs(ctx, objectAPI)
|
||||
}
|
||||
} else {
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
err = b.convertLegacyConfigs(ctx, objectAPI, configs)
|
||||
}
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// migrate unencrypted remote targets
|
||||
return b, b.migrateTargetConfig(ctx, objectAPI)
|
||||
if err := b.migrateTargetConfig(ctx, objectAPI); err != nil {
|
||||
return b, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
return loadBucketMetadataParse(ctx, objectAPI, bucket, true)
|
||||
}
|
||||
|
||||
// parseAllConfigs will parse all configs and populate the private fields.
|
||||
@@ -253,7 +295,7 @@ func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLa
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
func (b *BucketMetadata) getAllLegacyConfigs(ctx context.Context, objectAPI ObjectLayer) (map[string][]byte, error) {
|
||||
legacyConfigs := []string{
|
||||
legacyBucketObjectLockEnabledConfigFile,
|
||||
bucketPolicyConfig,
|
||||
@@ -267,7 +309,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
objectLockConfig,
|
||||
}
|
||||
|
||||
configs := make(map[string][]byte)
|
||||
configs := make(map[string][]byte, len(legacyConfigs))
|
||||
|
||||
// Handle migration from lockEnabled to newer format.
|
||||
if b.LockEnabled {
|
||||
@@ -292,16 +334,15 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
configs[legacyFile] = configData
|
||||
}
|
||||
|
||||
if len(configs) == 0 {
|
||||
// nothing to update, return right away.
|
||||
return b.parseAllConfigs(ctx, objectAPI)
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI ObjectLayer, configs map[string][]byte) error {
|
||||
for legacyFile, configData := range configs {
|
||||
switch legacyFile {
|
||||
case legacyBucketObjectLockEnabledConfigFile:
|
||||
@@ -332,6 +373,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
b.BucketTargetsConfigJSON = configData
|
||||
}
|
||||
}
|
||||
b.defaultTimestamps()
|
||||
|
||||
if err := b.Save(ctx, objectAPI); err != nil {
|
||||
return err
|
||||
@@ -347,6 +389,37 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
||||
return nil
|
||||
}
|
||||
|
||||
// default timestamps to metadata Created timestamp if unset.
|
||||
func (b *BucketMetadata) defaultTimestamps() {
|
||||
if b.PolicyConfigUpdatedAt.IsZero() {
|
||||
b.PolicyConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.EncryptionConfigUpdatedAt.IsZero() {
|
||||
b.EncryptionConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.TaggingConfigUpdatedAt.IsZero() {
|
||||
b.TaggingConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.ObjectLockConfigUpdatedAt.IsZero() {
|
||||
b.ObjectLockConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.QuotaConfigUpdatedAt.IsZero() {
|
||||
b.QuotaConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.ReplicationConfigUpdatedAt.IsZero() {
|
||||
b.ReplicationConfigUpdatedAt = b.Created
|
||||
}
|
||||
|
||||
if b.VersioningConfigUpdatedAt.IsZero() {
|
||||
b.VersioningConfigUpdatedAt = b.Created
|
||||
}
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
if err := b.parseAllConfigs(ctx, api); err != nil {
|
||||
@@ -369,23 +442,6 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
return saveConfig(ctx, api, configFile, data)
|
||||
}
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
|
||||
metadataFiles := []string{
|
||||
dataUsageCacheName,
|
||||
bucketMetadataFile,
|
||||
path.Join(replicationDir, resyncFileName),
|
||||
}
|
||||
for _, metaFile := range metadataFiles {
|
||||
configFile := path.Join(bucketMetaPrefix, bucket, metaFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrate config for remote targets by encrypting data if currently unencrypted and kms is configured.
|
||||
func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
var err error
|
||||
@@ -394,7 +450,7 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
|
||||
return nil
|
||||
}
|
||||
|
||||
encBytes, metaBytes, err := encryptBucketMetadata(b.Name, b.BucketTargetsConfigJSON, kms.Context{b.Name: b.Name, bucketTargetsFile: bucketTargetsFile})
|
||||
encBytes, metaBytes, err := encryptBucketMetadata(ctx, b.Name, b.BucketTargetsConfigJSON, kms.Context{b.Name: b.Name, bucketTargetsFile: bucketTargetsFile})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -405,14 +461,14 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
|
||||
}
|
||||
|
||||
// encrypt bucket metadata if kms is configured.
|
||||
func encryptBucketMetadata(bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) {
|
||||
func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) {
|
||||
if GlobalKMS == nil {
|
||||
output = input
|
||||
return
|
||||
}
|
||||
|
||||
metadata := make(map[string]string)
|
||||
key, err := GlobalKMS.GenerateKey("", kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -421,7 +477,7 @@ func encryptBucketMetadata(bucket string, input []byte, kmsContext kms.Context)
|
||||
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
|
||||
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
|
||||
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
if err != nil {
|
||||
return output, metabytes, err
|
||||
}
|
||||
@@ -451,6 +507,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
||||
}
|
||||
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
return outbuf.Bytes(), err
|
||||
}
|
||||
|
||||
@@ -108,6 +108,48 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigUpdatedAt":
|
||||
z.PolicyConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigUpdatedAt":
|
||||
z.ObjectLockConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigUpdatedAt":
|
||||
z.EncryptionConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigUpdatedAt":
|
||||
z.TaggingConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigUpdatedAt":
|
||||
z.QuotaConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigUpdatedAt":
|
||||
z.ReplicationConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "VersioningConfigUpdatedAt":
|
||||
z.VersioningConfigUpdatedAt, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "VersioningConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -121,9 +163,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 14
|
||||
// map header, size 21
|
||||
// write "Name"
|
||||
err = en.Append(0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
err = en.Append(0xde, 0x0, 0x15, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -262,15 +304,85 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
// write "PolicyConfigUpdatedAt"
|
||||
err = en.Append(0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.PolicyConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "ObjectLockConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ObjectLockConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "EncryptionConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.EncryptionConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "TaggingConfigUpdatedAt"
|
||||
err = en.Append(0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.TaggingConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "QuotaConfigUpdatedAt"
|
||||
err = en.Append(0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.QuotaConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "ReplicationConfigUpdatedAt"
|
||||
err = en.Append(0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ReplicationConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
// write "VersioningConfigUpdatedAt"
|
||||
err = en.Append(0xb9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.VersioningConfigUpdatedAt)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "VersioningConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 14
|
||||
// map header, size 21
|
||||
// string "Name"
|
||||
o = append(o, 0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = append(o, 0xde, 0x0, 0x15, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
@@ -311,6 +423,27 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "BucketTargetsConfigMetaJSON"
|
||||
o = append(o, 0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.BucketTargetsConfigMetaJSON)
|
||||
// string "PolicyConfigUpdatedAt"
|
||||
o = append(o, 0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.PolicyConfigUpdatedAt)
|
||||
// string "ObjectLockConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.ObjectLockConfigUpdatedAt)
|
||||
// string "EncryptionConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.EncryptionConfigUpdatedAt)
|
||||
// string "TaggingConfigUpdatedAt"
|
||||
o = append(o, 0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.TaggingConfigUpdatedAt)
|
||||
// string "QuotaConfigUpdatedAt"
|
||||
o = append(o, 0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.QuotaConfigUpdatedAt)
|
||||
// string "ReplicationConfigUpdatedAt"
|
||||
o = append(o, 0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.ReplicationConfigUpdatedAt)
|
||||
// string "VersioningConfigUpdatedAt"
|
||||
o = append(o, 0xb9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
|
||||
o = msgp.AppendTime(o, z.VersioningConfigUpdatedAt)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -416,6 +549,48 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigUpdatedAt":
|
||||
z.PolicyConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigUpdatedAt":
|
||||
z.ObjectLockConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigUpdatedAt":
|
||||
z.EncryptionConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigUpdatedAt":
|
||||
z.TaggingConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigUpdatedAt":
|
||||
z.QuotaConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "ReplicationConfigUpdatedAt":
|
||||
z.ReplicationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
case "VersioningConfigUpdatedAt":
|
||||
z.VersioningConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "VersioningConfigUpdatedAt")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -430,6 +605,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON)
|
||||
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucketName)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucketName, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -72,7 +72,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalNotificationSys.targetList); err != nil {
|
||||
if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
@@ -132,7 +132,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucketName, BucketOptions{})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -144,7 +144,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalNotificationSys.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
@@ -160,13 +160,13 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
if _, err = globalBucketMetadataSys.Update(ctx, bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
rulesMap := config.ToRulesMap()
|
||||
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
|
||||
globalEventNotifier.AddRulesMap(bucketName, rulesMap)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"net/http"
|
||||
|
||||
@@ -35,22 +36,16 @@ type BucketObjectLockSys struct{}
|
||||
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
|
||||
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketObjectLockConfigNotFound); ok {
|
||||
if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucketName}) {
|
||||
return r, nil
|
||||
}
|
||||
if errors.Is(err, errInvalidArgument) {
|
||||
return r, err
|
||||
}
|
||||
logger.CriticalIf(context.Background(), err)
|
||||
return r, err
|
||||
|
||||
}
|
||||
return config.ToRetention(), nil
|
||||
}
|
||||
|
||||
@@ -21,12 +21,11 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/madmin-go/v2"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
)
|
||||
@@ -61,7 +60,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -79,7 +78,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
|
||||
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -93,7 +92,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPolicyInvalidVersion), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -103,16 +102,18 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Policy: bucketPolicyBytes,
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Policy: bucketPolicyBytes,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -143,20 +144,22 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, nil); err != nil {
|
||||
updatedAt, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketPolicyConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err := globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
UpdatedAt: updatedAt,
|
||||
}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -187,7 +190,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
@@ -108,12 +107,13 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
||||
|
||||
// Wrapper for calling Create Bucket and ensure we get one and only one success.
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucketWithLocation"})
|
||||
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucket"})
|
||||
}
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
@@ -127,7 +127,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
||||
defer wg.Done()
|
||||
// Sync start.
|
||||
<-start
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
if err := obj.MakeBucket(GlobalContext, bucketName1, MakeBucketOptions{}); err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
t.Logf("unexpected error: %T: %v", err, err)
|
||||
return
|
||||
@@ -162,7 +162,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
if err := obj.MakeBucket(GlobalContext, bucketName1, MakeBucketOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -378,7 +378,8 @@ func TestGetBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
|
||||
func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}`
|
||||
|
||||
@@ -485,7 +486,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV4.Code)
|
||||
}
|
||||
// read the response body.
|
||||
bucketPolicyReadBuf, err := ioutil.ReadAll(recV4.Body)
|
||||
bucketPolicyReadBuf, err := io.ReadAll(recV4.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@@ -523,7 +524,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV2.Code)
|
||||
}
|
||||
// read the response body.
|
||||
bucketPolicyReadBuf, err = ioutil.ReadAll(recV2.Body)
|
||||
bucketPolicyReadBuf, err = io.ReadAll(recV2.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
@@ -38,7 +39,8 @@ type PolicySys struct{}
|
||||
|
||||
// Get returns stored bucket policy
|
||||
func (sys *PolicySys) Get(bucket string) (*policy.Policy, error) {
|
||||
return globalBucketMetadataSys.GetPolicyConfig(bucket)
|
||||
policy, _, err := globalBucketMetadataSys.GetPolicyConfig(bucket)
|
||||
return policy, err
|
||||
}
|
||||
|
||||
// IsAllowed - checks given policy args is allowed to continue the Rest API.
|
||||
@@ -124,6 +126,20 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
|
||||
cloneHeader := r.Header.Clone()
|
||||
|
||||
if userTags := cloneHeader.Get(xhttp.AmzObjectTagging); userTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(userTags)
|
||||
if tag != nil {
|
||||
tagMap := tag.ToMap()
|
||||
keys := make([]string, 0, len(tagMap))
|
||||
for k, v := range tagMap {
|
||||
args[pathJoin("ExistingObjectTag", k)] = []string{v}
|
||||
args[pathJoin("RequestObjectTag", k)] = []string{v}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
args["RequestObjectTagKeys"] = keys
|
||||
}
|
||||
}
|
||||
|
||||
for _, objLock := range []string{
|
||||
xhttp.AmzObjectLockMode,
|
||||
xhttp.AmzObjectLockLegalHold,
|
||||
@@ -136,6 +152,9 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
|
||||
for key, values := range cloneHeader {
|
||||
if strings.EqualFold(key, xhttp.AmzObjectTagging) {
|
||||
continue
|
||||
}
|
||||
if existingValues, found := args[key]; found {
|
||||
args[key] = append(existingValues, values...)
|
||||
} else {
|
||||
@@ -168,6 +187,8 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
|
||||
// JWT specific values
|
||||
//
|
||||
// Add all string claims
|
||||
for k, v := range claims {
|
||||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
@@ -182,6 +203,21 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add groups claim which could be a list. This will ensure that the claim
|
||||
// `jwt:groups` works.
|
||||
if grpsVal, ok := claims["groups"]; ok {
|
||||
if grpsIs, ok := grpsVal.([]interface{}); ok {
|
||||
grps := []string{}
|
||||
for _, gI := range grpsIs {
|
||||
if g, ok := gI.(string); ok {
|
||||
grps = append(grps, g)
|
||||
}
|
||||
}
|
||||
if len(grps) > 0 {
|
||||
args["groups"] = grps
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user